diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 10feacf5ef4..d552db889c7 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -1,17 +1,15 @@ -name: Build +name: Build and smoke test on: pull_request: - branches: [ master, rc/* ] - types: [opened, ready_for_review] - push: + branches: [master, rc/*] workflow_dispatch: jobs: build: strategy: matrix: - runs-on: [ubuntu-latest] + runs-on: [ubuntu-latest, macos-13, macos-13-xlarge] runs-on: ${{ matrix.runs-on }} name: Build steps: @@ -28,12 +26,23 @@ jobs: run: | go get -v -t -d ./... if [ -f Gopkg.toml ]; then - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh - dep ensure + curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure fi + - name: Build run: | cd ${GITHUB_WORKSPACE}/cmd/node && go build . + cd ${GITHUB_WORKSPACE}/cmd/seednode && go build . cd ${GITHUB_WORKSPACE}/cmd/keygenerator && go build . cd ${GITHUB_WORKSPACE}/cmd/logviewer && go build . cd ${GITHUB_WORKSPACE}/cmd/termui && go build . + + # On GitHub, we only run the short tests, and we only run them for some OS/ARCH combinations. + - name: Run tests + run: | + GOOS=$(go env GOOS) + + if [[ "$GOOS" == darwin ]]; then + go test -short -v ./... + fi diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 9916e67d744..fe74d301325 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -15,7 +15,7 @@ jobs: build: strategy: matrix: - runs-on: [ubuntu-latest] # TODO add macos-latest when builds are possible on macs + runs-on: [ubuntu-latest, macos-13, macos-13-xlarge] runs-on: ${{ matrix.runs-on }} name: Build steps: @@ -45,21 +45,23 @@ jobs: GOOS=$(go env GOOS) GOARCH=$(go env GOARCH) GOPATH=$(go env GOPATH) - ARCHIVE="multiversx_""$APP_VER_SHORT""_""$GOOS""_""$GOARCH"".tgz" + ARCHIVE="multiversx_""$APP_VER_SHORT""_""$GOOS""_""$GOARCH"".zip" BUILD_DIR=${GITHUB_WORKSPACE}/build - WASM_VERSION=$(cat go.mod | grep mx-chain-vm-v | sort -n | tail -n -1| awk -F '/' '{print$3}'| sed 's/ /@/g') - WASMER_DIR=${GOPATH}/pkg/mod/github.com/multiversx/${WASM_VERSION}/wasmer + VM_GO_VERSION=$(cat go.mod | grep mx-chain-vm-go | sort -n | tail -n -1| awk -F '/' '{print$3}'| sed 's/ /@/g') + VM_GO_DIR=${GOPATH}/pkg/mod/github.com/multiversx/${VM_GO_VERSION} echo "GOOS=${GOOS}" >> $GITHUB_ENV echo "GOARCH=${GOARCH}" >> $GITHUB_ENV echo "ARCHIVE=${ARCHIVE}" >> $GITHUB_ENV echo "BUILD_DIR=${BUILD_DIR}" >> $GITHUB_ENV - echo "WASMER_DIR=${WASMER_DIR}" >> $GITHUB_ENV + echo "VM_GO_VERSION=${VM_GO_VERSION}" >> $GITHUB_ENV + echo "VM_GO_DIR=${VM_GO_DIR}" >> $GITHUB_ENV - name: Build run: | mkdir -p ${BUILD_DIR} cd ${GITHUB_WORKSPACE}/cmd/node && go build -o "${BUILD_DIR}/node" -a -ldflags="-X main.appVersion=${APP_VER}" + cd ${GITHUB_WORKSPACE}/cmd/seednode && go build -o "${BUILD_DIR}/seednode" -a -ldflags="-X main.appVersion=${APP_VER}" cd ${GITHUB_WORKSPACE}/cmd/keygenerator && go build -o "${BUILD_DIR}/keygenerator" -a -ldflags="-X main.appVersion=${APP_VER}" cd ${GITHUB_WORKSPACE}/cmd/logviewer && go build -o "${BUILD_DIR}/logviewer" -a -ldflags="-X main.appVersion=${APP_VER}" cd ${GITHUB_WORKSPACE}/cmd/termui && go build -o "${BUILD_DIR}/termui" -a -ldflags="-X main.appVersion=${APP_VER}" @@ -69,24 +71,68 @@ jobs: cd ${GITHUB_WORKSPACE} if [[ "$GOOS" == linux && "$GOARCH" == amd64 ]]; then - cp -f ${WASMER_DIR}/libwasmer_linux_amd64.so ${BUILD_DIR}; + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer2/libvmexeccapi.so ${BUILD_DIR}/libvmexeccapi.so + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer/libwasmer_linux_amd64.so ${BUILD_DIR}/libwasmer_linux_amd64.so fi + + # Actually, there's no runner for this combination (as of March 2024). if [[ "$GOOS" == linux && "$GOARCH" == arm64 ]]; then - cp -f ${WASMER_DIR}/libwasmer_linux_arm64.so ${BUILD_DIR}; + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer2/libvmexeccapi_arm.so ${BUILD_DIR}/libvmexeccapi_arm.so + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer/libwasmer_linux_arm64_shim.so ${BUILD_DIR}/libwasmer_linux_arm64_shim.so fi + if [[ "$GOOS" == darwin && "$GOARCH" == amd64 ]]; then - cp -f ${WASMER_DIR}/libwasmer_darwin_amd64.dylib ${BUILD_DIR}; + cp -v ${VM_GO_DIR}/wasmer2/libvmexeccapi.dylib ${BUILD_DIR}/libvmexeccapi.dylib + cp -v ${VM_GO_DIR}/wasmer/libwasmer_darwin_amd64.dylib ${BUILD_DIR}/libwasmer_darwin_amd64.dylib + fi + + if [[ "$GOOS" == darwin && "$GOARCH" == arm64 ]]; then + cp -v ${VM_GO_DIR}/wasmer2/libvmexeccapi_arm.dylib ${BUILD_DIR}/libvmexeccapi_arm.dylib + cp -v ${VM_GO_DIR}/wasmer/libwasmer_darwin_arm64_shim.dylib ${BUILD_DIR}/libwasmer_darwin_arm64_shim.dylib fi - cd ${BUILD_DIR} - tar czvf "${GITHUB_WORKSPACE}/${ARCHIVE}" * - stat ${GITHUB_WORKSPACE}/${ARCHIVE} + if [[ "$GOOS" == linux ]]; then + patchelf --set-rpath "\$ORIGIN" ${BUILD_DIR}/node + patchelf --set-rpath "\$ORIGIN" ${BUILD_DIR}/seednode + + ldd ${BUILD_DIR}/node + ldd ${BUILD_DIR}/seednode + fi + + if [[ "$GOOS" == darwin ]]; then + install_name_tool -add_rpath "@loader_path" ${BUILD_DIR}/node + install_name_tool -add_rpath "@loader_path" ${BUILD_DIR}/seednode + + otool -L ${BUILD_DIR}/node + otool -L ${BUILD_DIR}/seednode + fi + + - name: Smoke test + run: | + # Remove all downloaded Go packages, so that we can test the binary's independence from them (think of Wasmer libraries). + sudo rm -rf ${GOPATH}/pkg/mod + + # Test binaries in different current directories. + cd ${BUILD_DIR} && ./node --version + cd ${GITHUB_WORKSPACE} && ${BUILD_DIR}/node --version + cd / && ${BUILD_DIR}/node --version + + cd ${BUILD_DIR} && ./seednode --version + cd ${GITHUB_WORKSPACE} && ${BUILD_DIR}/seednode --version + cd / && ${BUILD_DIR}/seednode --version + + - name: Package build output + run: | + sudo chown -R $USER: ${BUILD_DIR} + chmod -R 755 ${BUILD_DIR} + ls -al ${BUILD_DIR} + zip -r -j ${ARCHIVE} ${BUILD_DIR} - name: Save artifacts uses: actions/upload-artifact@v3 with: name: ${{ env.ARCHIVE }} - path: ${{ github.workspace }}/${{ env.ARCHIVE }} + path: ${{ env.ARCHIVE }} if-no-files-found: error release: @@ -113,6 +159,6 @@ jobs: run: | gh release create --draft --notes="Release draft from Github Actions" vNext sleep 10 - for i in $(find ./assets -name '*.tgz' -type f); do + for i in $(find ./assets -name '*.zip' -type f); do gh release upload vNext ${i} done diff --git a/.github/workflows/docker-keygenerator.yaml b/.github/workflows/docker-keygenerator.yaml new file mode 100644 index 00000000000..5e4d9d44a32 --- /dev/null +++ b/.github/workflows/docker-keygenerator.yaml @@ -0,0 +1,36 @@ +name: Build & push keygenerator docker image + +on: + workflow_dispatch: + pull_request: + +jobs: + build-docker-image: + strategy: + matrix: + runs-on: [ubuntu-latest] + runs-on: ${{ matrix.runs-on }} + + steps: + - name: Check out code into the Go module directory + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log into Docker Hub + if: github.event_name != 'pull_request' + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build and push image to Docker Hub + id: push + uses: docker/build-push-action@v6 + with: + context: . + file: ./docker/keygenerator/Dockerfile + platforms: linux/amd64,linux/arm64 + push: ${{ github.event_name != 'pull_request' }} + tags: multiversx/chain-keygenerator:latest diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 611fadc3d08..1cc46af26c8 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -5,6 +5,7 @@ on: - master pull_request: branches: [ master, feat/*, rc/* ] + workflow_dispatch: permissions: contents: read diff --git a/api/errors/errors.go b/api/errors/errors.go index b01cec657ca..30cfb923bbd 100644 --- a/api/errors/errors.go +++ b/api/errors/errors.go @@ -174,3 +174,6 @@ var ErrGetWaitingManagedKeys = errors.New("error getting the waiting managed key // ErrGetWaitingEpochsLeftForPublicKey signals that an error occurred while getting the waiting epochs left for public key var ErrGetWaitingEpochsLeftForPublicKey = errors.New("error getting the waiting epochs left for public key") + +// ErrRecursiveRelayedTxIsNotAllowed signals that recursive relayed tx is not allowed +var ErrRecursiveRelayedTxIsNotAllowed = errors.New("recursive relayed tx is not allowed") diff --git a/api/groups/addressGroup.go b/api/groups/addressGroup.go index 1866c3bf022..151b7f53372 100644 --- a/api/groups/addressGroup.go +++ b/api/groups/addressGroup.go @@ -38,6 +38,7 @@ const ( urlParamBlockHash = "blockHash" urlParamBlockRootHash = "blockRootHash" urlParamHintEpoch = "hintEpoch" + urlParamWithKeys = "withKeys" ) // addressFacadeHandler defines the methods to be implemented by a facade for handling address requests @@ -71,9 +72,11 @@ type esdtTokenData struct { Properties string `json:"properties"` } -type esdtNFTTokenData struct { +// ESDTNFTTokenData defines the exposed nft token data structure +type ESDTNFTTokenData struct { TokenIdentifier string `json:"tokenIdentifier"` Balance string `json:"balance"` + Type string `json:"type"` Properties string `json:"properties,omitempty"` Name string `json:"name,omitempty"` Nonce uint64 `json:"nonce,omitempty"` @@ -185,6 +188,14 @@ func (ag *addressGroup) getAccount(c *gin.Context) { return } + withKeys, err := parseBoolUrlParam(c, urlParamWithKeys) + if err != nil { + shared.RespondWithValidationError(c, errors.ErrCouldNotGetAccount, err) + return + } + + options.WithKeys = withKeys + accountResponse, blockInfo, err := ag.getFacade().GetAccount(addr, options) if err != nil { shared.RespondWithInternalError(c, errors.ErrCouldNotGetAccount, err) @@ -439,7 +450,7 @@ func (ag *addressGroup) getAllESDTData(c *gin.Context) { return } - formattedTokens := make(map[string]*esdtNFTTokenData) + formattedTokens := make(map[string]*ESDTNFTTokenData) for tokenID, esdtData := range tokens { tokenData := buildTokenDataApiResponse(tokenID, esdtData) @@ -472,12 +483,14 @@ func (ag *addressGroup) isDataTrieMigrated(c *gin.Context) { shared.RespondWithSuccess(c, gin.H{"isMigrated": isMigrated}) } -func buildTokenDataApiResponse(tokenIdentifier string, esdtData *esdt.ESDigitalToken) *esdtNFTTokenData { - tokenData := &esdtNFTTokenData{ +func buildTokenDataApiResponse(tokenIdentifier string, esdtData *esdt.ESDigitalToken) *ESDTNFTTokenData { + tokenData := &ESDTNFTTokenData{ TokenIdentifier: tokenIdentifier, Balance: esdtData.Value.String(), Properties: hex.EncodeToString(esdtData.Properties), } + + tokenType := core.ESDTType(esdtData.Type).String() if esdtData.TokenMetaData != nil { tokenData.Name = string(esdtData.TokenMetaData.Name) tokenData.Nonce = esdtData.TokenMetaData.Nonce @@ -486,11 +499,25 @@ func buildTokenDataApiResponse(tokenIdentifier string, esdtData *esdt.ESDigitalT tokenData.Hash = esdtData.TokenMetaData.Hash tokenData.URIs = esdtData.TokenMetaData.URIs tokenData.Attributes = esdtData.TokenMetaData.Attributes + + tokenType = getTokenType(esdtData.GetType(), tokenData.Nonce) } + tokenData.Type = tokenType + return tokenData } +func getTokenType(tokenType uint32, tokenNonce uint64) string { + isNotFungible := tokenNonce != 0 + tokenTypeNotSet := isNotFungible && core.ESDTType(tokenType) == core.NonFungible + if tokenTypeNotSet { + return "" + } + + return core.ESDTType(tokenType).String() +} + func (ag *addressGroup) getFacade() addressFacadeHandler { ag.mutFacade.RLock() defer ag.mutFacade.RUnlock() diff --git a/api/groups/nodeGroup.go b/api/groups/nodeGroup.go index fd61f481c39..e7025c033d9 100644 --- a/api/groups/nodeGroup.go +++ b/api/groups/nodeGroup.go @@ -28,6 +28,7 @@ const ( bootstrapStatusPath = "/bootstrapstatus" connectedPeersRatingsPath = "/connected-peers-ratings" managedKeys = "/managed-keys" + loadedKeys = "/loaded-keys" managedKeysCount = "/managed-keys/count" eligibleManagedKeys = "/managed-keys/eligible" waitingManagedKeys = "/managed-keys/waiting" @@ -44,6 +45,7 @@ type nodeFacadeHandler interface { GetConnectedPeersRatingsOnMainNetwork() (string, error) GetManagedKeysCount() int GetManagedKeys() []string + GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) @@ -129,6 +131,11 @@ func NewNodeGroup(facade nodeFacadeHandler) (*nodeGroup, error) { Method: http.MethodGet, Handler: ng.managedKeys, }, + { + Path: loadedKeys, + Method: http.MethodGet, + Handler: ng.loadedKeys, + }, { Path: eligibleManagedKeys, Method: http.MethodGet, @@ -411,6 +418,19 @@ func (ng *nodeGroup) managedKeys(c *gin.Context) { ) } +// loadedKeys returns all keys loaded by the current node +func (ng *nodeGroup) loadedKeys(c *gin.Context) { + keys := ng.getFacade().GetLoadedKeys() + c.JSON( + http.StatusOK, + shared.GenericAPIResponse{ + Data: gin.H{"loadedKeys": keys}, + Error: "", + Code: shared.ReturnCodeSuccess, + }, + ) +} + // managedKeysEligible returns the node's eligible managed keys func (ng *nodeGroup) managedKeysEligible(c *gin.Context) { keys, err := ng.getFacade().GetEligibleManagedKeys() diff --git a/api/groups/nodeGroup_test.go b/api/groups/nodeGroup_test.go index 6aa00d91693..4bc6e6c738e 100644 --- a/api/groups/nodeGroup_test.go +++ b/api/groups/nodeGroup_test.go @@ -81,6 +81,13 @@ type managedKeysResponse struct { generalResponse } +type loadedKeysResponse struct { + Data struct { + LoadedKeys []string `json:"loadedKeys"` + } `json:"data"` + generalResponse +} + type managedEligibleKeysResponse struct { Data struct { Keys []string `json:"eligibleKeys"` @@ -764,6 +771,36 @@ func TestNodeGroup_ManagedKeys(t *testing.T) { assert.Equal(t, providedKeys, response.Data.ManagedKeys) } +func TestNodeGroup_LoadedKeys(t *testing.T) { + t.Parallel() + + providedKeys := []string{ + "pk1", + "pk2", + } + facade := mock.FacadeStub{ + GetLoadedKeysCalled: func() []string { + return providedKeys + }, + } + + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("GET", "/node/loaded-keys", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := &loadedKeysResponse{} + loadResponse(resp.Body, response) + + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, "", response.Error) + assert.Equal(t, providedKeys, response.Data.LoadedKeys) +} + func TestNodeGroup_ManagedKeysEligible(t *testing.T) { t.Parallel() @@ -1046,6 +1083,7 @@ func getNodeRoutesConfig() config.ApiRoutesConfig { {Name: "/connected-peers-ratings", Open: true}, {Name: "/managed-keys/count", Open: true}, {Name: "/managed-keys", Open: true}, + {Name: "/loaded-keys", Open: true}, {Name: "/managed-keys/eligible", Open: true}, {Name: "/managed-keys/waiting", Open: true}, {Name: "/waiting-epochs-left/:key", Open: true}, diff --git a/api/groups/transactionGroup.go b/api/groups/transactionGroup.go index c2b47bf7a87..29d07de2640 100644 --- a/api/groups/transactionGroup.go +++ b/api/groups/transactionGroup.go @@ -182,27 +182,23 @@ func (tg *transactionGroup) simulateTransaction(c *gin.Context) { return } - txArgs := &external.ArgsCreateTransaction{ - Nonce: ftx.Nonce, - Value: ftx.Value, - Receiver: ftx.Receiver, - ReceiverUsername: ftx.ReceiverUsername, - Sender: ftx.Sender, - SenderUsername: ftx.SenderUsername, - GasPrice: ftx.GasPrice, - GasLimit: ftx.GasLimit, - DataField: ftx.Data, - SignatureHex: ftx.Signature, - ChainID: ftx.ChainID, - Version: ftx.Version, - Options: ftx.Options, - Guardian: ftx.GuardianAddr, - GuardianSigHex: ftx.GuardianSignature, + innerTxs, err := tg.extractInnerTransactions(ftx.InnerTransactions) + if err != nil { + c.JSON( + http.StatusBadRequest, + shared.GenericAPIResponse{ + Data: nil, + Error: fmt.Sprintf("%s: %s", errors.ErrTxGenerationFailed.Error(), err.Error()), + Code: shared.ReturnCodeRequestError, + }, + ) + return } - start := time.Now() - tx, txHash, err := tg.getFacade().CreateTransaction(txArgs) - logging.LogAPIActionDurationIfNeeded(start, "API call: CreateTransaction") + if len(innerTxs) == 0 { + innerTxs = nil + } + tx, txHash, err := tg.createTransaction(&ftx, innerTxs) if err != nil { c.JSON( http.StatusBadRequest, @@ -215,7 +211,7 @@ func (tg *transactionGroup) simulateTransaction(c *gin.Context) { return } - start = time.Now() + start := time.Now() err = tg.getFacade().ValidateTransactionForSimulation(tx, checkSignature) logging.LogAPIActionDurationIfNeeded(start, "API call: ValidateTransactionForSimulation") if err != nil { @@ -272,26 +268,23 @@ func (tg *transactionGroup) sendTransaction(c *gin.Context) { return } - txArgs := &external.ArgsCreateTransaction{ - Nonce: ftx.Nonce, - Value: ftx.Value, - Receiver: ftx.Receiver, - ReceiverUsername: ftx.ReceiverUsername, - Sender: ftx.Sender, - SenderUsername: ftx.SenderUsername, - GasPrice: ftx.GasPrice, - GasLimit: ftx.GasLimit, - DataField: ftx.Data, - SignatureHex: ftx.Signature, - ChainID: ftx.ChainID, - Version: ftx.Version, - Options: ftx.Options, - Guardian: ftx.GuardianAddr, - GuardianSigHex: ftx.GuardianSignature, + innerTxs, err := tg.extractInnerTransactions(ftx.InnerTransactions) + if err != nil { + c.JSON( + http.StatusBadRequest, + shared.GenericAPIResponse{ + Data: nil, + Error: fmt.Sprintf("%s: %s", errors.ErrTxGenerationFailed.Error(), err.Error()), + Code: shared.ReturnCodeRequestError, + }, + ) + return } - start := time.Now() - tx, txHash, err := tg.getFacade().CreateTransaction(txArgs) - logging.LogAPIActionDurationIfNeeded(start, "API call: CreateTransaction") + + if len(innerTxs) == 0 { + innerTxs = nil + } + tx, txHash, err := tg.createTransaction(&ftx, innerTxs) if err != nil { c.JSON( http.StatusBadRequest, @@ -304,7 +297,7 @@ func (tg *transactionGroup) sendTransaction(c *gin.Context) { return } - start = time.Now() + start := time.Now() err = tg.getFacade().ValidateTransaction(tx) logging.LogAPIActionDurationIfNeeded(start, "API call: ValidateTransaction") if err != nil { @@ -370,25 +363,23 @@ func (tg *transactionGroup) sendMultipleTransactions(c *gin.Context) { var start time.Time txsHashes := make(map[int]string) for idx, receivedTx := range ftxs { - txArgs := &external.ArgsCreateTransaction{ - Nonce: receivedTx.Nonce, - Value: receivedTx.Value, - Receiver: receivedTx.Receiver, - ReceiverUsername: receivedTx.ReceiverUsername, - Sender: receivedTx.Sender, - SenderUsername: receivedTx.SenderUsername, - GasPrice: receivedTx.GasPrice, - GasLimit: receivedTx.GasLimit, - DataField: receivedTx.Data, - SignatureHex: receivedTx.Signature, - ChainID: receivedTx.ChainID, - Version: receivedTx.Version, - Options: receivedTx.Options, - Guardian: receivedTx.GuardianAddr, - GuardianSigHex: receivedTx.GuardianSignature, + innerTxs, errExtractInnerTransactions := tg.extractInnerTransactions(receivedTx.InnerTransactions) + if errExtractInnerTransactions != nil { + c.JSON( + http.StatusBadRequest, + shared.GenericAPIResponse{ + Data: nil, + Error: fmt.Sprintf("%s: %s", errors.ErrTxGenerationFailed.Error(), err.Error()), + Code: shared.ReturnCodeInternalError, + }, + ) + return } - tx, txHash, err = tg.getFacade().CreateTransaction(txArgs) - logging.LogAPIActionDurationIfNeeded(start, "API call: CreateTransaction") + + if len(innerTxs) == 0 { + innerTxs = nil + } + tx, txHash, err = tg.createTransaction(&receivedTx, innerTxs) if err != nil { continue } @@ -499,26 +490,23 @@ func (tg *transactionGroup) computeTransactionGasLimit(c *gin.Context) { return } - txArgs := &external.ArgsCreateTransaction{ - Nonce: ftx.Nonce, - Value: ftx.Value, - Receiver: ftx.Receiver, - ReceiverUsername: ftx.ReceiverUsername, - Sender: ftx.Sender, - SenderUsername: ftx.SenderUsername, - GasPrice: ftx.GasPrice, - GasLimit: ftx.GasLimit, - DataField: ftx.Data, - SignatureHex: ftx.Signature, - ChainID: ftx.ChainID, - Version: ftx.Version, - Options: ftx.Options, - Guardian: ftx.GuardianAddr, - GuardianSigHex: ftx.GuardianSignature, + innerTxs, errExtractInnerTransactions := tg.extractInnerTransactions(ftx.InnerTransactions) + if errExtractInnerTransactions != nil { + c.JSON( + http.StatusBadRequest, + shared.GenericAPIResponse{ + Data: nil, + Error: fmt.Sprintf("%s: %s", errors.ErrTxGenerationFailed.Error(), errExtractInnerTransactions.Error()), + Code: shared.ReturnCodeInternalError, + }, + ) + return } - start := time.Now() - tx, _, err := tg.getFacade().CreateTransaction(txArgs) - logging.LogAPIActionDurationIfNeeded(start, "API call: CreateTransaction") + + if len(innerTxs) == 0 { + innerTxs = nil + } + tx, _, err := tg.createTransaction(&ftx, innerTxs) if err != nil { c.JSON( http.StatusInternalServerError, @@ -531,7 +519,7 @@ func (tg *transactionGroup) computeTransactionGasLimit(c *gin.Context) { return } - start = time.Now() + start := time.Now() cost, err := tg.getFacade().ComputeTransactionGasLimit(tx) logging.LogAPIActionDurationIfNeeded(start, "API call: ComputeTransactionGasLimit") if err != nil { @@ -728,6 +716,33 @@ func (tg *transactionGroup) getTransactionsPoolNonceGapsForSender(sender string, ) } +func (tg *transactionGroup) createTransaction(receivedTx *transaction.FrontendTransaction, innerTxs []*transaction.Transaction) (*transaction.Transaction, []byte, error) { + txArgs := &external.ArgsCreateTransaction{ + Nonce: receivedTx.Nonce, + Value: receivedTx.Value, + Receiver: receivedTx.Receiver, + ReceiverUsername: receivedTx.ReceiverUsername, + Sender: receivedTx.Sender, + SenderUsername: receivedTx.SenderUsername, + GasPrice: receivedTx.GasPrice, + GasLimit: receivedTx.GasLimit, + DataField: receivedTx.Data, + SignatureHex: receivedTx.Signature, + ChainID: receivedTx.ChainID, + Version: receivedTx.Version, + Options: receivedTx.Options, + Guardian: receivedTx.GuardianAddr, + GuardianSigHex: receivedTx.GuardianSignature, + Relayer: receivedTx.Relayer, + InnerTransactions: innerTxs, + } + start := time.Now() + tx, txHash, err := tg.getFacade().CreateTransaction(txArgs) + logging.LogAPIActionDurationIfNeeded(start, "API call: CreateTransaction") + + return tx, txHash, err +} + func validateQuery(sender, fields string, lastNonce, nonceGaps bool) error { if fields != "" && lastNonce { return errors.ErrFetchingLatestNonceCannotIncludeFields @@ -745,6 +760,10 @@ func validateQuery(sender, fields string, lastNonce, nonceGaps bool) error { return errors.ErrEmptySenderToGetNonceGaps } + if fields == "*" { + return nil + } + if fields != "" { return validateFields(fields) } @@ -821,6 +840,28 @@ func (tg *transactionGroup) getFacade() transactionFacadeHandler { return tg.facade } +func (tg *transactionGroup) extractInnerTransactions( + innerTransactions []*transaction.FrontendTransaction, +) ([]*transaction.Transaction, error) { + innerTxs := make([]*transaction.Transaction, 0, len(innerTransactions)) + if len(innerTransactions) != 0 { + for _, innerTx := range innerTransactions { + if len(innerTx.InnerTransactions) != 0 { + return innerTxs, errors.ErrRecursiveRelayedTxIsNotAllowed + } + + newInnerTx, _, err := tg.createTransaction(innerTx, nil) + if err != nil { + return innerTxs, err + } + + innerTxs = append(innerTxs, newInnerTx) + } + } + + return innerTxs, nil +} + // UpdateFacade will update the facade func (tg *transactionGroup) UpdateFacade(newFacade interface{}) error { if newFacade == nil { diff --git a/api/groups/transactionGroup_test.go b/api/groups/transactionGroup_test.go index 1f8f6bffbd4..f183dd30b4c 100644 --- a/api/groups/transactionGroup_test.go +++ b/api/groups/transactionGroup_test.go @@ -312,6 +312,7 @@ func TestTransactionGroup_sendTransaction(t *testing.T) { expectedErr, ) }) + t.Run("recursive relayed v3 should error", testRecursiveRelayedV3("/transaction/send")) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -520,6 +521,7 @@ func TestTransactionGroup_computeTransactionGasLimit(t *testing.T) { expectedErr, ) }) + t.Run("recursive relayed v3 should error", testRecursiveRelayedV3("/transaction/cost")) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -640,6 +642,7 @@ func TestTransactionGroup_simulateTransaction(t *testing.T) { expectedErr, ) }) + t.Run("recursive relayed v3 should error", testRecursiveRelayedV3("/transaction/simulate")) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -704,6 +707,7 @@ func TestTransactionGroup_getTransactionsPool(t *testing.T) { t.Run("fields + nonce gaps", testTxPoolWithInvalidQuery("?fields=sender,receiver&nonce-gaps=true", apiErrors.ErrFetchingNonceGapsCannotIncludeFields)) t.Run("fields has spaces", testTxPoolWithInvalidQuery("?fields=sender ,receiver", apiErrors.ErrInvalidFields)) t.Run("fields has numbers", testTxPoolWithInvalidQuery("?fields=sender1", apiErrors.ErrInvalidFields)) + t.Run("fields + wild card", testTxPoolWithInvalidQuery("?fields=sender,receiver,*", apiErrors.ErrInvalidFields)) t.Run("GetTransactionsPool error should error", func(t *testing.T) { t.Parallel() @@ -816,8 +820,7 @@ func TestTransactionGroup_getTransactionsPool(t *testing.T) { t.Parallel() expectedSender := "sender" - providedFields := "sender,receiver" - query := "?by-sender=" + expectedSender + "&fields=" + providedFields + query := "?by-sender=" + expectedSender + "&fields=*" expectedResp := &common.TransactionsPoolForSenderApiResponse{ Transactions: []common.Transaction{ { @@ -1127,3 +1130,62 @@ func getTransactionRoutesConfig() config.ApiRoutesConfig { }, } } + +func testRecursiveRelayedV3(url string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + facade := &mock.FacadeStub{ + CreateTransactionHandler: func(txArgs *external.ArgsCreateTransaction) (*dataTx.Transaction, []byte, error) { + txHash, _ := hex.DecodeString(hexTxHash) + return nil, txHash, nil + }, + SendBulkTransactionsHandler: func(txs []*dataTx.Transaction) (u uint64, err error) { + return 1, nil + }, + ValidateTransactionHandler: func(tx *dataTx.Transaction) error { + return nil + }, + } + + userTx1 := fmt.Sprintf(`{"nonce": %d, "sender":"%s", "receiver":"%s", "value":"%s", "signature":"%s"}`, + nonce, + sender, + receiver, + value, + signature, + ) + userTx2 := fmt.Sprintf(`{"nonce": %d, "sender":"%s", "receiver":"%s", "value":"%s", "signature":"%s", "innerTransactions":[%s]}`, + nonce, + sender, + receiver, + value, + signature, + userTx1, + ) + tx := fmt.Sprintf(`{"nonce": %d, "sender":"%s", "receiver":"%s", "value":"%s", "signature":"%s", "innerTransactions":[%s]}`, + nonce, + sender, + receiver, + value, + signature, + userTx2, + ) + + transactionGroup, err := groups.NewTransactionGroup(facade) + require.NoError(t, err) + + ws := startWebServer(transactionGroup, "transaction", getTransactionRoutesConfig()) + + req, _ := http.NewRequest("POST", url, bytes.NewBuffer([]byte(tx))) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + txResp := shared.GenericAPIResponse{} + loadResponse(resp.Body, &txResp) + + assert.Equal(t, http.StatusBadRequest, resp.Code) + assert.True(t, strings.Contains(txResp.Error, apiErrors.ErrRecursiveRelayedTxIsNotAllowed.Error())) + assert.Empty(t, txResp.Data) + } +} diff --git a/api/groups/validatorGroup.go b/api/groups/validatorGroup.go index f2c206b34f3..1120ae4186d 100644 --- a/api/groups/validatorGroup.go +++ b/api/groups/validatorGroup.go @@ -10,13 +10,18 @@ import ( "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/common" ) -const statisticsPath = "/statistics" +const ( + statisticsPath = "/statistics" + auctionPath = "/auction" +) // validatorFacadeHandler defines the methods to be implemented by a facade for validator requests type validatorFacadeHandler interface { ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) IsInterfaceNil() bool } @@ -43,6 +48,11 @@ func NewValidatorGroup(facade validatorFacadeHandler) (*validatorGroup, error) { Method: http.MethodGet, Handler: ng.statistics, }, + { + Path: auctionPath, + Method: http.MethodGet, + Handler: ng.auction, + }, } ng.endpoints = endpoints @@ -74,6 +84,31 @@ func (vg *validatorGroup) statistics(c *gin.Context) { ) } +// auction will return the list of the validators in the auction list +func (vg *validatorGroup) auction(c *gin.Context) { + valStats, err := vg.getFacade().AuctionListApi() + if err != nil { + c.JSON( + http.StatusBadRequest, + shared.GenericAPIResponse{ + Data: nil, + Error: err.Error(), + Code: shared.ReturnCodeRequestError, + }, + ) + return + } + + c.JSON( + http.StatusOK, + shared.GenericAPIResponse{ + Data: gin.H{"auctionList": valStats}, + Error: "", + Code: shared.ReturnCodeSuccess, + }, + ) +} + func (vg *validatorGroup) getFacade() validatorFacadeHandler { vg.mutFacade.RLock() defer vg.mutFacade.RUnlock() diff --git a/api/groups/validatorGroup_test.go b/api/groups/validatorGroup_test.go index 0bb20a869cd..0bbd1ebf742 100644 --- a/api/groups/validatorGroup_test.go +++ b/api/groups/validatorGroup_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/api/groups" "github.com/multiversx/mx-chain-go/api/mock" "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -34,11 +35,18 @@ func TestNewValidatorGroup(t *testing.T) { } // ValidatorStatisticsResponse is the response for the validator statistics endpoint. -type ValidatorStatisticsResponse struct { +type validatorStatisticsResponse struct { Result map[string]*validator.ValidatorStatistics `json:"statistics"` Error string `json:"error"` } +type auctionListResponse struct { + Data struct { + Result []*common.AuctionListValidatorAPIResponse `json:"auctionList"` + } `json:"data"` + Error string +} + func TestValidatorStatistics_ErrorWhenFacadeFails(t *testing.T) { t.Parallel() @@ -60,7 +68,7 @@ func TestValidatorStatistics_ErrorWhenFacadeFails(t *testing.T) { resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - response := ValidatorStatisticsResponse{} + response := validatorStatisticsResponse{} loadResponse(resp.Body, &response) assert.Equal(t, http.StatusBadRequest, resp.Code) @@ -97,7 +105,7 @@ func TestValidatorStatistics_ReturnsSuccessfully(t *testing.T) { response := shared.GenericAPIResponse{} loadResponse(resp.Body, &response) - validatorStatistics := ValidatorStatisticsResponse{} + validatorStatistics := validatorStatisticsResponse{} mapResponseData := response.Data.(map[string]interface{}) mapResponseDataBytes, _ := json.Marshal(mapResponseData) _ = json.Unmarshal(mapResponseDataBytes, &validatorStatistics) @@ -147,14 +155,13 @@ func TestValidatorGroup_UpdateFacade(t *testing.T) { require.NoError(t, err) ws := startWebServer(validatorGroup, "validator", getValidatorRoutesConfig()) - req, _ := http.NewRequest("GET", "/validator/statistics", nil) resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) response := shared.GenericAPIResponse{} loadResponse(resp.Body, &response) - validatorStatistics := ValidatorStatisticsResponse{} + validatorStatistics := validatorStatisticsResponse{} mapResponseData := response.Data.(map[string]interface{}) mapResponseDataBytes, _ := json.Marshal(mapResponseData) _ = json.Unmarshal(mapResponseDataBytes, &validatorStatistics) @@ -191,12 +198,71 @@ func TestValidatorGroup_IsInterfaceNil(t *testing.T) { require.False(t, validatorGroup.IsInterfaceNil()) } +func TestAuctionList_ErrorWhenFacadeFails(t *testing.T) { + t.Parallel() + + errStr := "error in facade" + facade := mock.FacadeStub{ + AuctionListHandler: func() ([]*common.AuctionListValidatorAPIResponse, error) { + return nil, errors.New(errStr) + }, + } + + validatorGroup, err := groups.NewValidatorGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(validatorGroup, "validator", getValidatorRoutesConfig()) + req, _ := http.NewRequest("GET", "/validator/auction", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := auctionListResponse{} + loadResponse(resp.Body, &response) + + assert.Equal(t, http.StatusBadRequest, resp.Code) + assert.Contains(t, response.Error, errStr) +} + +func TestAuctionList_ReturnsSuccessfully(t *testing.T) { + t.Parallel() + + auctionListToReturn := []*common.AuctionListValidatorAPIResponse{ + { + Owner: "owner", + NumStakedNodes: 4, + TotalTopUp: "1234", + TopUpPerNode: "4321", + QualifiedTopUp: "4444", + }, + } + facade := mock.FacadeStub{ + AuctionListHandler: func() ([]*common.AuctionListValidatorAPIResponse, error) { + return auctionListToReturn, nil + }, + } + + validatorGroup, err := groups.NewValidatorGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(validatorGroup, "validator", getValidatorRoutesConfig()) + req, _ := http.NewRequest("GET", "/validator/auction", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := auctionListResponse{} + loadResponse(resp.Body, &response) + + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, response.Data.Result, auctionListToReturn) +} + func getValidatorRoutesConfig() config.ApiRoutesConfig { return config.ApiRoutesConfig{ APIPackages: map[string]config.APIPackageConfig{ "validator": { Routes: []config.RouteConfig{ {Name: "/statistics", Open: true}, + {Name: "/auction", Open: true}, }, }, }, diff --git a/api/mock/facadeStub.go b/api/mock/facadeStub.go index 50572622897..e40645c1ac3 100644 --- a/api/mock/facadeStub.go +++ b/api/mock/facadeStub.go @@ -91,10 +91,12 @@ type FacadeStub struct { IsDataTrieMigratedCalled func(address string, options api.AccountQueryOptions) (bool, error) GetManagedKeysCountCalled func() int GetManagedKeysCalled func() []string + GetLoadedKeysCalled func() []string GetEligibleManagedKeysCalled func() ([]string, error) GetWaitingManagedKeysCalled func() ([]string, error) GetWaitingEpochsLeftForPublicKeyCalled func(publicKey string) (uint32, error) P2PPrometheusMetricsEnabledCalled func() bool + AuctionListHandler func() ([]*common.AuctionListValidatorAPIResponse, error) } // GetTokenSupply - @@ -195,12 +197,20 @@ func (f *FacadeStub) PprofEnabled() bool { // GetHeartbeats returns the slice of heartbeat info func (f *FacadeStub) GetHeartbeats() ([]data.PubKeyHeartbeat, error) { - return f.GetHeartbeatsHandler() + if f.GetHeartbeatsHandler != nil { + return f.GetHeartbeatsHandler() + } + + return nil, nil } // GetBalance is the mock implementation of a handler's GetBalance method func (f *FacadeStub) GetBalance(address string, options api.AccountQueryOptions) (*big.Int, api.BlockInfo, error) { - return f.GetBalanceCalled(address, options) + if f.GetBalanceCalled != nil { + return f.GetBalanceCalled(address, options) + } + + return nil, api.BlockInfo{}, nil } // GetValueForKey is the mock implementation of a handler's GetValueForKey method @@ -285,7 +295,11 @@ func (f *FacadeStub) GetAllIssuedESDTs(tokenType string) ([]string, error) { // GetAccount - func (f *FacadeStub) GetAccount(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - return f.GetAccountCalled(address, options) + if f.GetAccountCalled != nil { + return f.GetAccountCalled(address, options) + } + + return api.AccountResponse{}, api.BlockInfo{}, nil } // GetAccounts - @@ -299,72 +313,137 @@ func (f *FacadeStub) GetAccounts(addresses []string, options api.AccountQueryOpt // CreateTransaction is mock implementation of a handler's CreateTransaction method func (f *FacadeStub) CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) { - return f.CreateTransactionHandler(txArgs) + if f.CreateTransactionHandler != nil { + return f.CreateTransactionHandler(txArgs) + } + + return nil, nil, nil } // GetTransaction is the mock implementation of a handler's GetTransaction method func (f *FacadeStub) GetTransaction(hash string, withResults bool) (*transaction.ApiTransactionResult, error) { - return f.GetTransactionHandler(hash, withResults) + if f.GetTransactionHandler != nil { + return f.GetTransactionHandler(hash, withResults) + } + + return nil, nil } // SimulateTransactionExecution is the mock implementation of a handler's SimulateTransactionExecution method func (f *FacadeStub) SimulateTransactionExecution(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { - return f.SimulateTransactionExecutionHandler(tx) + if f.SimulateTransactionExecutionHandler != nil { + return f.SimulateTransactionExecutionHandler(tx) + } + + return nil, nil } // SendBulkTransactions is the mock implementation of a handler's SendBulkTransactions method func (f *FacadeStub) SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) { - return f.SendBulkTransactionsHandler(txs) + if f.SendBulkTransactionsHandler != nil { + return f.SendBulkTransactionsHandler(txs) + } + + return 0, nil } // ValidateTransaction - func (f *FacadeStub) ValidateTransaction(tx *transaction.Transaction) error { - return f.ValidateTransactionHandler(tx) + if f.ValidateTransactionHandler != nil { + return f.ValidateTransactionHandler(tx) + } + + return nil } // ValidateTransactionForSimulation - func (f *FacadeStub) ValidateTransactionForSimulation(tx *transaction.Transaction, bypassSignature bool) error { - return f.ValidateTransactionForSimulationHandler(tx, bypassSignature) + if f.ValidateTransactionForSimulationHandler != nil { + return f.ValidateTransactionForSimulationHandler(tx, bypassSignature) + } + + return nil } // ValidatorStatisticsApi is the mock implementation of a handler's ValidatorStatisticsApi method func (f *FacadeStub) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { - return f.ValidatorStatisticsHandler() + if f.ValidatorStatisticsHandler != nil { + return f.ValidatorStatisticsHandler() + } + + return nil, nil +} + +// AuctionListApi is the mock implementation of a handler's AuctionListApi method +func (f *FacadeStub) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + if f.AuctionListHandler != nil { + return f.AuctionListHandler() + } + + return nil, nil } // ExecuteSCQuery is a mock implementation. func (f *FacadeStub) ExecuteSCQuery(query *process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) { - return f.ExecuteSCQueryHandler(query) + if f.ExecuteSCQueryHandler != nil { + return f.ExecuteSCQueryHandler(query) + } + + return nil, api.BlockInfo{}, nil } // StatusMetrics is the mock implementation for the StatusMetrics func (f *FacadeStub) StatusMetrics() external.StatusMetricsHandler { - return f.StatusMetricsHandler() + if f.StatusMetricsHandler != nil { + return f.StatusMetricsHandler() + } + + return nil } // GetTotalStakedValue - func (f *FacadeStub) GetTotalStakedValue() (*api.StakeValues, error) { - return f.GetTotalStakedValueHandler() + if f.GetTotalStakedValueHandler != nil { + return f.GetTotalStakedValueHandler() + } + + return nil, nil } // GetDirectStakedList - func (f *FacadeStub) GetDirectStakedList() ([]*api.DirectStakedValue, error) { - return f.GetDirectStakedListHandler() + if f.GetDirectStakedListHandler != nil { + return f.GetDirectStakedListHandler() + } + + return nil, nil } // GetDelegatorsList - func (f *FacadeStub) GetDelegatorsList() ([]*api.Delegator, error) { - return f.GetDelegatorsListHandler() + if f.GetDelegatorsListHandler != nil { + return f.GetDelegatorsListHandler() + } + + return nil, nil } // ComputeTransactionGasLimit - func (f *FacadeStub) ComputeTransactionGasLimit(tx *transaction.Transaction) (*transaction.CostResponse, error) { - return f.ComputeTransactionGasLimitHandler(tx) + if f.ComputeTransactionGasLimitHandler != nil { + return f.ComputeTransactionGasLimitHandler(tx) + } + + return nil, nil } // NodeConfig - func (f *FacadeStub) NodeConfig() map[string]interface{} { - return f.NodeConfigCalled() + if f.NodeConfigCalled != nil { + return f.NodeConfigCalled() + } + + return nil } // EncodeAddressPubkey - @@ -382,17 +461,29 @@ func (f *FacadeStub) DecodeAddressPubkey(pk string) ([]byte, error) { // GetQueryHandler - func (f *FacadeStub) GetQueryHandler(name string) (debug.QueryHandler, error) { - return f.GetQueryHandlerCalled(name) + if f.GetQueryHandlerCalled != nil { + return f.GetQueryHandlerCalled(name) + } + + return nil, nil } // GetPeerInfo - func (f *FacadeStub) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { - return f.GetPeerInfoCalled(pid) + if f.GetPeerInfoCalled != nil { + return f.GetPeerInfoCalled(pid) + } + + return nil, nil } // GetConnectedPeersRatingsOnMainNetwork - func (f *FacadeStub) GetConnectedPeersRatingsOnMainNetwork() (string, error) { - return f.GetConnectedPeersRatingsOnMainNetworkCalled() + if f.GetConnectedPeersRatingsOnMainNetworkCalled != nil { + return f.GetConnectedPeersRatingsOnMainNetworkCalled() + } + + return "", nil } // GetEpochStartDataAPI - @@ -402,12 +493,20 @@ func (f *FacadeStub) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataA // GetBlockByNonce - func (f *FacadeStub) GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) { - return f.GetBlockByNonceCalled(nonce, options) + if f.GetBlockByNonceCalled != nil { + return f.GetBlockByNonceCalled(nonce, options) + } + + return nil, nil } // GetBlockByHash - func (f *FacadeStub) GetBlockByHash(hash string, options api.BlockQueryOptions) (*api.Block, error) { - return f.GetBlockByHashCalled(hash, options) + if f.GetBlockByHashCalled != nil { + return f.GetBlockByHashCalled(hash, options) + } + + return nil, nil } // GetBlockByRound - @@ -596,6 +695,14 @@ func (f *FacadeStub) GetManagedKeys() []string { return make([]string, 0) } +// GetLoadedKeys - +func (f *FacadeStub) GetLoadedKeys() []string { + if f.GetLoadedKeysCalled != nil { + return f.GetLoadedKeysCalled() + } + return make([]string, 0) +} + // GetEligibleManagedKeys - func (f *FacadeStub) GetEligibleManagedKeys() ([]string, error) { if f.GetEligibleManagedKeysCalled != nil { diff --git a/api/shared/interface.go b/api/shared/interface.go index 9be6e66c7b8..4b775ebdd39 100644 --- a/api/shared/interface.go +++ b/api/shared/interface.go @@ -115,6 +115,7 @@ type FacadeHandler interface { ComputeTransactionGasLimit(tx *transaction.Transaction) (*transaction.CostResponse, error) EncodeAddressPubkey(pk []byte) (string, error) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) ExecuteSCQuery(*process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) DecodeAddressPubkey(pk string) ([]byte, error) RestApiInterface() string @@ -130,6 +131,7 @@ type FacadeHandler interface { IsDataTrieMigrated(address string, options api.AccountQueryOptions) (bool, error) GetManagedKeysCount() int GetManagedKeys() []string + GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) diff --git a/cmd/assessment/main.go b/cmd/assessment/main.go index 8e61205de2b..47642c03faa 100644 --- a/cmd/assessment/main.go +++ b/cmd/assessment/main.go @@ -12,7 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/cmd/assessment/benchmarks" "github.com/multiversx/mx-chain-go/cmd/assessment/benchmarks/factory" - "github.com/multiversx/mx-chain-go/cmd/assessment/hostParameters" + "github.com/multiversx/mx-chain-go/common/hostParameters" logger "github.com/multiversx/mx-chain-logger-go" "github.com/urfave/cli" ) diff --git a/cmd/assessment/testdata/cpucalculate.wasm b/cmd/assessment/testdata/cpucalculate.wasm old mode 100644 new mode 100755 index 1dc0dc30389..8f04b918eaa Binary files a/cmd/assessment/testdata/cpucalculate.wasm and b/cmd/assessment/testdata/cpucalculate.wasm differ diff --git a/cmd/assessment/testdata/storage100.wasm b/cmd/assessment/testdata/storage100.wasm old mode 100644 new mode 100755 index afc590aa0e6..b1b9701c7af Binary files a/cmd/assessment/testdata/storage100.wasm and b/cmd/assessment/testdata/storage100.wasm differ diff --git a/cmd/node/config/api.toml b/cmd/node/config/api.toml index 2c7fb1d7889..a10ec049554 100644 --- a/cmd/node/config/api.toml +++ b/cmd/node/config/api.toml @@ -43,6 +43,9 @@ # /node/managed-keys will return the keys managed by the node { Name = "/managed-keys", Open = true }, + # /node/loaded-keys will return the keys loaded by the node + { Name = "/loaded-keys", Open = true }, + # /node/managed-keys/count will return the number of keys managed by the node { Name = "/managed-keys/count", Open = true }, @@ -170,7 +173,10 @@ [APIPackages.validator] Routes = [ # /validator/statistics will return a list of validators statistics for all validators - { Name = "/statistics", Open = true } + { Name = "/statistics", Open = true }, + + # /validator/auction will return a list of nodes that are in the auction list + { Name = "/auction", Open = true }, ] [APIPackages.vm-values] diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 05edd47443c..7f07d4dd380 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -35,10 +35,13 @@ # SyncProcessTimeInMillis is the value in milliseconds used when processing blocks while synchronizing blocks SyncProcessTimeInMillis = 12000 - # SetGuardianEpochsDelay represents the delay in epochs between the execution time of the SetGuardian transaction and - # the activation of the configured guardian. - # Make sure that this is greater than the unbonding period! - SetGuardianEpochsDelay = 2 # TODO: for mainnet should be 20, 2 is just for testing + # SetGuardianEpochsDelay represents the delay in epochs between the execution time of the SetGuardian transaction and + # the activation of the configured guardian. + # Make sure that this is greater than the unbonding period! + SetGuardianEpochsDelay = 2 # TODO: for mainnet should be 20, 2 is just for testing + +[HardwareRequirements] + CPUFlags = ["SSE4", "SSE42"] [Versions] DefaultVersion = "default" @@ -489,6 +492,7 @@ [Antiflood] Enabled = true NumConcurrentResolverJobs = 50 + NumConcurrentResolvingTrieNodesJobs = 3 [Antiflood.FastReacting] IntervalInSeconds = 1 ReservedPercent = 20.0 @@ -617,6 +621,7 @@ Type = "json" [EpochStartConfig] + GenesisEpoch = 0 MinRoundsBetweenEpochs = 20 RoundsPerEpoch = 200 # Min and Max ShuffledOutRestartThreshold represents the minimum and maximum duration of an epoch (in percentage) after a node which @@ -626,6 +631,7 @@ MinNumConnectedPeersToStart = 2 MinNumOfPeersToConsiderBlockValid = 2 + ExtraDelayForRequestBlockInfoInMilliseconds = 3000 # ResourceStats, if enabled, will output in a folder called "stats" # resource statistics. For example: number of active go routines, memory allocation, number of GC sweeps, etc. @@ -655,6 +661,7 @@ PeerStatePruningEnabled = true MaxStateTrieLevelInMemory = 5 MaxPeerTrieLevelInMemory = 5 + StateStatisticsEnabled = false [BlockSizeThrottleConfig] MinSizeInBytes = 104857 # 104857 is 10% from 1MB @@ -665,9 +672,11 @@ TimeOutForSCExecutionInMilliseconds = 10000 # 10 seconds = 10000 milliseconds WasmerSIGSEGVPassthrough = false # must be false for release WasmVMVersions = [ - { StartEpoch = 0, Version = "v1.3" }, - { StartEpoch = 1, Version = "v1.4" }, - { StartEpoch = 3, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly + { StartEpoch = 0, Version = "v1.4" }, + { StartEpoch = 1, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly + ] + TransferAndExecuteByUserAddresses = [ # TODO: set real contract addresses for all shards + "erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe3", #shard 0 ] [VirtualMachine.Querying] @@ -675,9 +684,11 @@ TimeOutForSCExecutionInMilliseconds = 10000 # 10 seconds = 10000 milliseconds WasmerSIGSEGVPassthrough = false # must be false for release WasmVMVersions = [ - { StartEpoch = 0, Version = "v1.3" }, - { StartEpoch = 1, Version = "v1.4" }, - { StartEpoch = 3, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly + { StartEpoch = 0, Version = "v1.4" }, + { StartEpoch = 1, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly + ] + TransferAndExecuteByUserAddresses = [ # TODO: set real contract addresses for all shards + "erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe3", ] [VirtualMachine.GasConfig] @@ -935,3 +946,6 @@ # MaxRoundsOfInactivityAccepted defines the number of rounds missed by a main or higher level backup machine before # the current machine will take over and propose/sign blocks. Used in both single-key and multi-key modes. MaxRoundsOfInactivityAccepted = 3 + +[RelayedTransactionConfig] + MaxTransactionsAllowed = 50 diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 5da22947810..afd36dcb825 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -62,7 +62,7 @@ ESDTEnableEpoch = 1 # GovernanceEnableEpoch represents the epoch when governance is enabled - GovernanceEnableEpoch = 5 + GovernanceEnableEpoch = 1 # GovernanceDisableProposeEnableEpoch represents the epoch when governance disable proposal is enabled GovernanceDisableProposeEnableEpoch = 6 @@ -96,9 +96,6 @@ # ValidatorToDelegationEnableEpoch represents the epoch when the validator-to-delegation feature will be enabled ValidatorToDelegationEnableEpoch = 1 - # WaitingListFixEnableEpoch represents the epoch when the 6 epoch waiting list fix is enabled - WaitingListFixEnableEpoch = 1000000 - # IncrementSCRNonceInMultiTransferEnableEpoch represents the epoch when the fix for preventing the generation of the same SCRs # is enabled. The fix is done by adding an extra increment. IncrementSCRNonceInMultiTransferEnableEpoch = 1 @@ -112,9 +109,6 @@ # ESDTTransferRoleEnableEpoch represents the epoch when esdt transfer role set is enabled ESDTTransferRoleEnableEpoch = 1 - # BuiltInFunctionOnMetaEnableEpoch represents the epoch when built in function processing on metachain is enabled - BuiltInFunctionOnMetaEnableEpoch = 1000000 - # ComputeRewardCheckpointEnableEpoch represents the epoch when compute rewards checkpoint epoch is enabled ComputeRewardCheckpointEnableEpoch = 1 @@ -258,43 +252,89 @@ DeterministicSortOnValidatorsInfoEnableEpoch = 1 # SCProcessorV2EnableEpoch represents the epoch when SC processor V2 will be used - SCProcessorV2EnableEpoch = 3 + SCProcessorV2EnableEpoch = 1 # AutoBalanceDataTriesEnableEpoch represents the epoch when the data tries are automatically balanced by inserting at the hashed key instead of the normal key - AutoBalanceDataTriesEnableEpoch = 3 + AutoBalanceDataTriesEnableEpoch = 1 + + # MigrateDataTrieEnableEpoch represents the epoch when the data tries migration is enabled + MigrateDataTrieEnableEpoch = 1 # KeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when the execution order of created SCRs is ensured - KeepExecOrderOnCreatedSCRsEnableEpoch = 3 + KeepExecOrderOnCreatedSCRsEnableEpoch = 1 # MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation is enabled - MultiClaimOnDelegationEnableEpoch = 3 + MultiClaimOnDelegationEnableEpoch = 1 # ChangeUsernameEnableEpoch represents the epoch when changing username is enabled - ChangeUsernameEnableEpoch = 3 + ChangeUsernameEnableEpoch = 4 # ConsistentTokensValuesLengthCheckEnableEpoch represents the epoch when the consistent tokens values length check is enabled - ConsistentTokensValuesLengthCheckEnableEpoch = 3 + ConsistentTokensValuesLengthCheckEnableEpoch = 1 # FixDelegationChangeOwnerOnAccountEnableEpoch represents the epoch when the fix for the delegation system smart contract is enabled - FixDelegationChangeOwnerOnAccountEnableEpoch = 3 + FixDelegationChangeOwnerOnAccountEnableEpoch = 1 # DynamicGasCostForDataTrieStorageLoadEnableEpoch represents the epoch when dynamic gas cost for data trie storage load will be enabled - DynamicGasCostForDataTrieStorageLoadEnableEpoch = 3 + DynamicGasCostForDataTrieStorageLoadEnableEpoch = 1 # ScToScLogEventEnableEpoch represents the epoch when the sc to sc log event feature is enabled - ScToScLogEventEnableEpoch = 3 + ScToScLogEventEnableEpoch = 1 # NFTStopCreateEnableEpoch represents the epoch when NFT stop create feature is enabled - NFTStopCreateEnableEpoch = 3 + NFTStopCreateEnableEpoch = 1 # ChangeOwnerAddressCrossShardThroughSCEnableEpoch represents the epoch when the change owner address built in function will work also through a smart contract call cross shard - ChangeOwnerAddressCrossShardThroughSCEnableEpoch = 3 + ChangeOwnerAddressCrossShardThroughSCEnableEpoch = 1 # FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch represents the epoch when the fix for the remaining gas in the SaveKeyValue builtin function is enabled - FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch = 3 + FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch = 1 # CurrentRandomnessOnSortingEnableEpoch represents the epoch when the current randomness on sorting is enabled - CurrentRandomnessOnSortingEnableEpoch = 4 + CurrentRandomnessOnSortingEnableEpoch = 1 + + # StakeLimitsEnableEpoch represents the epoch when stake limits on validators are enabled + # Should have the same value as StakingV4Step1EnableEpoch that triggers the automatic unstake operations for the queue nodes + StakeLimitsEnableEpoch = 1 + + # StakingV4Step1EnableEpoch represents the epoch when staking v4 is initialized. This is the epoch in which + # all nodes from staking queue are moved in the auction list + StakingV4Step1EnableEpoch = 1 + + # StakingV4Step2EnableEpoch represents the epoch when staking v4 is enabled. Should have a greater value than StakingV4Step1EnableEpoch. + # From this epoch, all shuffled out nodes are moved to auction nodes. No auction nodes selection is done yet. + StakingV4Step2EnableEpoch = 2 + + # StakingV4Step3EnableEpoch represents the epoch in which selected nodes from auction will be distributed to waiting list + StakingV4Step3EnableEpoch = 3 + + # CleanupAuctionOnLowWaitingListEnableEpoch represents the epoch when duplicated data cleanup from auction list is enabled in the condition of a low waiting list + # Should have the same value as StakingV4Step1EnableEpoch if the low waiting list has not happened, otherwise should have a greater value + CleanupAuctionOnLowWaitingListEnableEpoch = 1 + + # AlwaysMergeContextsInEEIEnableEpoch represents the epoch in which the EEI will always merge the contexts + AlwaysMergeContextsInEEIEnableEpoch = 1 + + # DynamicESDTEnableEpoch represents the epoch when dynamic NFT feature is enabled + DynamicESDTEnableEpoch = 4 + + # EGLDInMultiTransferEnableEpoch represents the epoch when EGLD in multitransfer is enabled + EGLDInMultiTransferEnableEpoch = 4 + + # CryptoOpcodesV2EnableEpoch represents the epoch when BLSMultiSig, Secp256r1 and other opcodes are enabled + CryptoOpcodesV2EnableEpoch = 4 + + # UnjailCleanupEnableEpoch represents the epoch when the cleanup of the unjailed nodes is enabled + UnJailCleanupEnableEpoch = 4 + + # RelayedTransactionsV3EnableEpoch represents the epoch when the relayed transactions V3 will be enabled + RelayedTransactionsV3EnableEpoch = 7 + + # FixRelayedBaseCostEnableEpoch represents the epoch when the fix for relayed base cost will be enabled + FixRelayedBaseCostEnableEpoch = 7 + + # MultiESDTNFTTransferAndExecuteByUserEnableEpoch represents the epoch when enshrined sovereign cross chain opcodes are enabled + MultiESDTNFTTransferAndExecuteByUserEnableEpoch = 9999999 # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers BLSMultiSignerEnableEpoch = [ @@ -304,13 +344,17 @@ # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ - { EpochEnable = 0, MaxNumNodes = 36, NodesToShufflePerShard = 4 }, - { EpochEnable = 1, MaxNumNodes = 56, NodesToShufflePerShard = 2 } + { EpochEnable = 0, MaxNumNodes = 48, NodesToShufflePerShard = 4 }, # 4 shuffled out keys / shard will not be reached normally + { EpochEnable = 1, MaxNumNodes = 64, NodesToShufflePerShard = 2 }, + # Staking v4 configuration, where: + # - Enable epoch = StakingV4Step3EnableEpoch + # - NodesToShufflePerShard = same as previous entry in MaxNodesChangeEnableEpoch + # - MaxNumNodes = (MaxNumNodesFromPreviousEpochEnable - (numOfShards+1)*NodesToShufflePerShard) + { EpochEnable = 3, MaxNumNodes = 56, NodesToShufflePerShard = 2 }, ] [GasSchedule] # GasScheduleByEpochs holds the configuration for the gas schedule that will be applied from specific epochs GasScheduleByEpochs = [ - { StartEpoch = 0, FileName = "gasScheduleV1.toml" }, - { StartEpoch = 1, FileName = "gasScheduleV7.toml" }, + { StartEpoch = 0, FileName = "gasScheduleV8.toml" }, ] diff --git a/cmd/node/config/enableRounds.toml b/cmd/node/config/enableRounds.toml index e9940cf1b7c..d7be75bb524 100644 --- a/cmd/node/config/enableRounds.toml +++ b/cmd/node/config/enableRounds.toml @@ -10,4 +10,4 @@ [RoundActivations] [RoundActivations.DisableAsyncCallV1] Options = [] - Round = "500" + Round = "100" diff --git a/cmd/node/config/fullArchiveP2P.toml b/cmd/node/config/fullArchiveP2P.toml index 0dd790a83f6..927eb0eb431 100644 --- a/cmd/node/config/fullArchiveP2P.toml +++ b/cmd/node/config/fullArchiveP2P.toml @@ -23,10 +23,11 @@ [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" # TCP listen address PreventPortReuse = false - [Node.ResourceLimiter] - Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". - ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" - ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" + + [Node.ResourceLimiter] + Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". + ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" + ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" # P2P peer discovery section @@ -48,9 +49,12 @@ # RefreshIntervalInSec represents the time in seconds between querying for new peers RefreshIntervalInSec = 10 - # ProtocolID represents the protocol that this node will advertize to other peers - # To connect to other nodes, those nodes should have the same ProtocolID string - ProtocolID = "/erd/kad/1.0.0" + # ProtocolIDs represents the protocols that this node will advertise to other peers + # To connect to other nodes, those nodes should have at least one common protocol string + ProtocolIDs = [ + "/erd/kad/1.0.0", + "mvx-full-archive", + ] # InitialPeerList represents the list of strings of some known nodes that will bootstrap this node # The address will be in a self-describing addressing format. @@ -71,10 +75,10 @@ [Sharding] # The targeted number of peer connections - TargetPeerCount = 36 + TargetPeerCount = 41 MaxIntraShardValidators = 7 MaxCrossShardValidators = 15 - MaxIntraShardObservers = 2 + MaxIntraShardObservers = 7 MaxCrossShardObservers = 3 MaxSeeders = 2 diff --git a/cmd/node/config/gasSchedules/gasScheduleV1.toml b/cmd/node/config/gasSchedules/gasScheduleV1.toml index 5eeb2259af5..f1e3ef07fbe 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV1.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV1.toml @@ -15,6 +15,11 @@ ESDTNFTAddUri = 500000 ESDTNFTUpdateAttributes = 500000 ESDTNFTMultiTransfer = 1000000 + ESDTModifyRoyalties = 500000 + ESDTModifyCreator = 500000 + ESDTNFTRecreate = 1000000 + ESDTNFTUpdate = 1000000 + ESDTNFTSetNewURIs = 500000 SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 @@ -110,6 +115,7 @@ GetCallbackClosure = 10000 GetCodeMetadata = 10000 IsBuiltinFunction = 10000 + IsReservedFunctionName = 10000 [EthAPICost] UseGas = 100 @@ -209,6 +215,9 @@ UnmarshalCompressedECC = 270000 GenerateKeyECC = 7000000 EncodeDERSig = 10000000 + VerifySecp256r1 = 2000000 + VerifyBLSSignatureShare = 2000000 + VerifyBLSMultiSig = 2000000 [ManagedBufferAPICost] MBufferNew = 2000 diff --git a/cmd/node/config/gasSchedules/gasScheduleV2.toml b/cmd/node/config/gasSchedules/gasScheduleV2.toml index e99b4e7ea59..dd2caa63a84 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV2.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV2.toml @@ -15,6 +15,11 @@ ESDTNFTAddUri = 500000 ESDTNFTUpdateAttributes = 500000 ESDTNFTMultiTransfer = 1000000 + ESDTModifyRoyalties = 500000 + ESDTModifyCreator = 500000 + ESDTNFTRecreate = 1000000 + ESDTNFTUpdate = 1000000 + ESDTNFTSetNewURIs = 500000 SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 @@ -110,6 +115,7 @@ GetCallbackClosure = 10000 GetCodeMetadata = 10000 IsBuiltinFunction = 10000 + IsReservedFunctionName = 10000 [EthAPICost] UseGas = 100 @@ -209,6 +215,9 @@ UnmarshalCompressedECC = 270000 GenerateKeyECC = 7000000 EncodeDERSig = 10000000 + VerifySecp256r1 = 2000000 + VerifyBLSSignatureShare = 2000000 + VerifyBLSMultiSig = 2000000 [ManagedBufferAPICost] MBufferNew = 2000 diff --git a/cmd/node/config/gasSchedules/gasScheduleV3.toml b/cmd/node/config/gasSchedules/gasScheduleV3.toml index b85870c484b..feb00683467 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV3.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV3.toml @@ -15,6 +15,11 @@ ESDTNFTAddUri = 500000 ESDTNFTUpdateAttributes = 500000 ESDTNFTMultiTransfer = 1000000 + ESDTModifyRoyalties = 500000 + ESDTModifyCreator = 500000 + ESDTNFTRecreate = 1000000 + ESDTNFTUpdate = 1000000 + ESDTNFTSetNewURIs = 500000 SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 @@ -110,6 +115,7 @@ GetCallbackClosure = 10000 GetCodeMetadata = 10000 IsBuiltinFunction = 10000 + IsReservedFunctionName = 10000 [EthAPICost] UseGas = 100 @@ -209,6 +215,9 @@ UnmarshalCompressedECC = 270000 GenerateKeyECC = 7000000 EncodeDERSig = 10000000 + VerifySecp256r1 = 2000000 + VerifyBLSSignatureShare = 2000000 + VerifyBLSMultiSig = 2000000 [ManagedBufferAPICost] MBufferNew = 2000 diff --git a/cmd/node/config/gasSchedules/gasScheduleV4.toml b/cmd/node/config/gasSchedules/gasScheduleV4.toml index 01e17c69b12..780ce744624 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV4.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV4.toml @@ -15,6 +15,11 @@ ESDTNFTAddUri = 50000 ESDTNFTUpdateAttributes = 50000 ESDTNFTMultiTransfer = 200000 + ESDTModifyRoyalties = 500000 + ESDTModifyCreator = 500000 + ESDTNFTRecreate = 1000000 + ESDTNFTUpdate = 1000000 + ESDTNFTSetNewURIs = 500000 SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 @@ -110,6 +115,7 @@ GetCallbackClosure = 10000 GetCodeMetadata = 10000 IsBuiltinFunction = 10000 + IsReservedFunctionName = 10000 [EthAPICost] UseGas = 100 @@ -209,6 +215,9 @@ UnmarshalCompressedECC = 270000 GenerateKeyECC = 7000000 EncodeDERSig = 10000000 + VerifySecp256r1 = 2000000 + VerifyBLSSignatureShare = 2000000 + VerifyBLSMultiSig = 2000000 [ManagedBufferAPICost] MBufferNew = 2000 diff --git a/cmd/node/config/gasSchedules/gasScheduleV5.toml b/cmd/node/config/gasSchedules/gasScheduleV5.toml index 8ec0ac39620..c778b1e81d0 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV5.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV5.toml @@ -15,6 +15,11 @@ ESDTNFTAddUri = 50000 ESDTNFTUpdateAttributes = 50000 ESDTNFTMultiTransfer = 200000 + ESDTModifyRoyalties = 500000 + ESDTModifyCreator = 500000 + ESDTNFTRecreate = 1000000 + ESDTNFTUpdate = 1000000 + ESDTNFTSetNewURIs = 500000 SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 @@ -110,6 +115,7 @@ GetCallbackClosure = 10000 GetCodeMetadata = 10000 IsBuiltinFunction = 10000 + IsReservedFunctionName = 10000 [EthAPICost] UseGas = 100 @@ -209,6 +215,9 @@ UnmarshalCompressedECC = 270000 GenerateKeyECC = 7000000 EncodeDERSig = 10000000 + VerifySecp256r1 = 2000000 + VerifyBLSSignatureShare = 2000000 + VerifyBLSMultiSig = 2000000 [ManagedBufferAPICost] MBufferNew = 2000 diff --git a/cmd/node/config/gasSchedules/gasScheduleV6.toml b/cmd/node/config/gasSchedules/gasScheduleV6.toml index 69e0d1406e4..67b48645646 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV6.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV6.toml @@ -15,6 +15,11 @@ ESDTNFTAddUri = 50000 ESDTNFTUpdateAttributes = 50000 ESDTNFTMultiTransfer = 200000 + ESDTModifyRoyalties = 500000 + ESDTModifyCreator = 500000 + ESDTNFTRecreate = 1000000 + ESDTNFTUpdate = 1000000 + ESDTNFTSetNewURIs = 500000 SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 @@ -110,6 +115,7 @@ GetCallbackClosure = 10000 GetCodeMetadata = 10000 IsBuiltinFunction = 10000 + IsReservedFunctionName = 10000 [EthAPICost] UseGas = 100 @@ -209,6 +215,9 @@ UnmarshalCompressedECC = 270000 GenerateKeyECC = 7000000 EncodeDERSig = 10000000 + VerifySecp256r1 = 2000000 + VerifyBLSSignatureShare = 2000000 + VerifyBLSMultiSig = 2000000 [ManagedBufferAPICost] MBufferNew = 2000 diff --git a/cmd/node/config/gasSchedules/gasScheduleV7.toml b/cmd/node/config/gasSchedules/gasScheduleV7.toml index 9bbf220c356..55204ed88e9 100644 --- a/cmd/node/config/gasSchedules/gasScheduleV7.toml +++ b/cmd/node/config/gasSchedules/gasScheduleV7.toml @@ -16,6 +16,11 @@ ESDTNFTUpdateAttributes = 50000 ESDTNFTMultiTransfer = 200000 MultiESDTNFTTransfer = 200000 # should be the same value with the ESDTNFTMultiTransfer + ESDTModifyRoyalties = 500000 + ESDTModifyCreator = 500000 + ESDTNFTRecreate = 1000000 + ESDTNFTUpdate = 1000000 + ESDTNFTSetNewURIs = 500000 SetGuardian = 250000 GuardAccount = 250000 UnGuardAccount = 250000 @@ -111,6 +116,7 @@ GetCallbackClosure = 10000 GetCodeMetadata = 10000 IsBuiltinFunction = 10000 + IsReservedFunctionName = 10000 [EthAPICost] UseGas = 100 @@ -210,6 +216,9 @@ UnmarshalCompressedECC = 270000 GenerateKeyECC = 7000000 EncodeDERSig = 10000000 + VerifySecp256r1 = 2000000 + VerifyBLSSignatureShare = 2000000 + VerifyBLSMultiSig = 2000000 [ManagedBufferAPICost] MBufferNew = 2000 diff --git a/cmd/node/config/gasSchedules/gasScheduleV8.toml b/cmd/node/config/gasSchedules/gasScheduleV8.toml new file mode 100644 index 00000000000..5492900fff6 --- /dev/null +++ b/cmd/node/config/gasSchedules/gasScheduleV8.toml @@ -0,0 +1,840 @@ +[BuiltInCost] + ChangeOwnerAddress = 5000000 + ClaimDeveloperRewards = 5000000 + SaveUserName = 1000000 + SaveKeyValue = 100000 + ESDTTransfer = 200000 + ESDTBurn = 100000 + ESDTLocalMint = 50000 + ESDTLocalBurn = 50000 + ESDTNFTCreate = 150000 + ESDTNFTAddQuantity = 50000 + ESDTNFTBurn = 50000 + ESDTNFTTransfer = 200000 + ESDTNFTChangeCreateOwner = 1000000 + ESDTNFTAddUri = 50000 + ESDTNFTUpdateAttributes = 50000 + ESDTNFTMultiTransfer = 200000 + MultiESDTNFTTransfer = 200000 # should be the same value with the ESDTNFTMultiTransfer + ESDTModifyRoyalties = 500000 + ESDTModifyCreator = 500000 + ESDTNFTRecreate = 1000000 + ESDTNFTUpdate = 1000000 + ESDTNFTSetNewURIs = 500000 + SetGuardian = 250000 + GuardAccount = 250000 + UnGuardAccount = 250000 + TrieLoadPerNode = 100000 + TrieStorePerNode = 50000 + +[MetaChainSystemSCsCost] + Stake = 5000000 + UnStake = 5000000 + UnBond = 5000000 + Claim = 5000000 + Get = 5000000 + ChangeRewardAddress = 5000000 + ChangeValidatorKeys = 5000000 + UnJail = 5000000 + DelegationOps = 1000000 + DelegationMgrOps = 50000000 + ValidatorToDelegation = 500000000 + ESDTIssue = 50000000 + ESDTOperations = 50000000 + Proposal = 50000000 + Vote = 5000000 + DelegateVote = 50000000 + RevokeVote = 50000000 + CloseProposal = 50000000 + ClearProposal = 50000000 + ClaimAccumulatedFees = 1000000 + ChangeConfig = 50000000 + GetAllNodeStates = 20000000 + UnstakeTokens = 5000000 + UnbondTokens = 5000000 + GetActiveFund = 50000 + FixWaitingListSize = 500000000 + +[BaseOperationCost] + StorePerByte = 10000 + ReleasePerByte = 1000 + DataCopyPerByte = 50 + PersistPerByte = 1000 + CompilePerByte = 300 + AoTPreparePerByte = 100 + GetCode = 1000000 + +[BaseOpsAPICost] + GetSCAddress = 1000 + GetOwnerAddress = 5000 + IsSmartContract = 5000 + GetShardOfAddress = 5000 + GetExternalBalance = 7000 + GetBlockHash = 10000 + TransferValue = 100000 + GetArgument = 1000 + GetFunction = 1000 + GetNumArguments = 1000 + StorageStore = 75000 + StorageLoad = 50000 + CachedStorageLoad = 1000 + GetCaller = 1000 + GetCallValue = 1000 + Log = 3750 + Finish = 1 + SignalError = 1 + GetBlockTimeStamp = 10000 + GetGasLeft = 1000 + Int64GetArgument = 1000 + Int64StorageStore = 75000 + Int64StorageLoad = 50000 + Int64Finish = 1000 + GetStateRootHash = 10000 + GetBlockNonce = 10000 + GetBlockEpoch = 10000 + GetBlockRound = 10000 + GetBlockRandomSeed = 10000 + ExecuteOnSameContext = 100000 + ExecuteOnDestContext = 100000 + DelegateExecution = 100000 + AsyncCallStep = 100000 + AsyncCallbackGasLock = 4000000 + ExecuteReadOnly = 160000 + CreateContract = 300000 + GetReturnData = 1000 + GetNumReturnData = 1000 + GetReturnDataSize = 1000 + GetOriginalTxHash = 10000 + CleanReturnData = 1000 + DeleteFromReturnData = 1000 + GetPrevTxHash = 10000 + GetCurrentTxHash = 10000 + CreateAsyncCall = 200000 + SetAsyncCallback = 100000 + SetAsyncGroupCallback = 100000 + SetAsyncContextCallback = 100000 + GetCallbackClosure = 10000 + GetCodeMetadata = 10000 + IsBuiltinFunction = 10000 + IsReservedFunctionName = 10000 + +[EthAPICost] + UseGas = 100 + GetAddress = 100000 + GetExternalBalance = 70000 + GetBlockHash = 100000 + Call = 160000 + CallDataCopy = 200 + GetCallDataSize = 100 + CallCode = 160000 + CallDelegate = 160000 + CallStatic = 160000 + StorageStore = 250000 + StorageLoad = 100000 + GetCaller = 100 + GetCallValue = 100 + CodeCopy = 1000 + GetCodeSize = 100 + GetBlockCoinbase = 100 + Create = 320000 + GetBlockDifficulty = 100 + ExternalCodeCopy = 3000 + GetExternalCodeSize = 2500 + GetGasLeft = 100 + GetBlockGasLimit = 100000 + GetTxGasPrice = 1000 + Log = 3750 + GetBlockNumber = 100000 + GetTxOrigin = 100000 + Finish = 1 + Revert = 1 + GetReturnDataSize = 200 + ReturnDataCopy = 500 + SelfDestruct = 5000000 + GetBlockTimeStamp = 100000 + +[BigIntAPICost] + BigIntNew = 2000 + BigIntByteLength = 2000 + BigIntUnsignedByteLength = 2000 + BigIntSignedByteLength = 2000 + BigIntGetBytes = 2000 + BigIntGetUnsignedBytes = 2000 + BigIntGetSignedBytes = 2000 + BigIntSetBytes = 2000 + BigIntSetUnsignedBytes = 2000 + BigIntSetSignedBytes = 2000 + BigIntIsInt64 = 2000 + BigIntGetInt64 = 2000 + BigIntSetInt64 = 2000 + BigIntAdd = 2000 + BigIntSub = 2000 + BigIntMul = 6000 + BigIntSqrt = 6000 + BigIntPow = 6000 + BigIntLog = 6000 + BigIntTDiv = 6000 + BigIntTMod = 6000 + BigIntEDiv = 6000 + BigIntEMod = 6000 + BigIntAbs = 2000 + BigIntNeg = 2000 + BigIntSign = 2000 + BigIntCmp = 2000 + BigIntNot = 2000 + BigIntAnd = 2000 + BigIntOr = 2000 + BigIntXor = 2000 + BigIntShr = 2000 + BigIntShl = 2000 + BigIntFinishUnsigned = 1000 + BigIntFinishSigned = 1000 + BigIntStorageLoadUnsigned = 50000 + BigIntStorageStoreUnsigned = 75000 + BigIntGetArgument = 1000 + BigIntGetUnsignedArgument = 1000 + BigIntGetSignedArgument = 1000 + BigIntGetCallValue = 1000 + BigIntGetExternalBalance = 10000 + CopyPerByteForTooBig = 1000 + +[CryptoAPICost] + SHA256 = 1000000 + Keccak256 = 1000000 + Ripemd160 = 1000000 + VerifyBLS = 5000000 + VerifyEd25519 = 2000000 + VerifySecp256k1 = 2000000 + EllipticCurveNew = 10000 + AddECC = 75000 + DoubleECC = 65000 + IsOnCurveECC = 10000 + ScalarMultECC = 400000 + MarshalECC = 13000 + MarshalCompressedECC = 15000 + UnmarshalECC = 20000 + UnmarshalCompressedECC = 270000 + GenerateKeyECC = 7000000 + EncodeDERSig = 10000000 + VerifySecp256r1 = 2000000 + VerifyBLSSignatureShare = 2000000 + VerifyBLSMultiSig = 2000000 + +[ManagedBufferAPICost] + MBufferNew = 2000 + MBufferNewFromBytes = 2000 + MBufferGetLength = 2000 + MBufferGetBytes = 2000 + MBufferGetByteSlice = 2000 + MBufferCopyByteSlice = 2000 + MBufferSetBytes = 2000 + MBufferAppend = 2000 + MBufferAppendBytes = 2000 + MBufferToBigIntUnsigned = 2000 + MBufferToBigIntSigned = 5000 + MBufferFromBigIntUnsigned = 2000 + MBufferFromBigIntSigned = 5000 + MBufferStorageStore = 75000 + MBufferStorageLoad = 50000 + MBufferGetArgument = 1000 + MBufferFinish = 1000 + MBufferSetRandom = 6000 + MBufferToBigFloat = 2000 + MBufferFromBigFloat = 2000 + +[BigFloatAPICost] + BigFloatNewFromParts = 3000 + BigFloatAdd = 7000 + BigFloatSub = 7000 + BigFloatMul = 7000 + BigFloatDiv = 7000 + BigFloatTruncate = 5000 + BigFloatNeg = 5000 + BigFloatClone = 5000 + BigFloatCmp = 4000 + BigFloatAbs = 5000 + BigFloatSqrt = 7000 + BigFloatPow = 10000 + BigFloatFloor = 5000 + BigFloatCeil = 5000 + BigFloatIsInt = 3000 + BigFloatSetBigInt = 3000 + BigFloatSetInt64 = 1000 + BigFloatGetConst = 1000 + +[WASMOpcodeCost] + Unreachable = 5 + Nop = 5 + Block = 5 + Loop = 5 + If = 5 + Else = 5 + End = 5 + Br = 5 + BrIf = 5 + BrTable = 5 + Return = 5 + Call = 5 + CallIndirect = 5 + Drop = 5 + Select = 5 + TypedSelect = 5 + LocalGet = 5 + LocalSet = 5 + LocalTee = 5 + GlobalGet = 5 + GlobalSet = 5 + I32Load = 5 + I64Load = 5 + F32Load = 6 + F64Load = 6 + I32Load8S = 5 + I32Load8U = 5 + I32Load16S = 5 + I32Load16U = 5 + I64Load8S = 5 + I64Load8U = 5 + I64Load16S = 5 + I64Load16U = 5 + I64Load32S = 5 + I64Load32U = 5 + I32Store = 5 + I64Store = 5 + F32Store = 12 + F64Store = 12 + I32Store8 = 5 + I32Store16 = 5 + I64Store8 = 5 + I64Store16 = 5 + I64Store32 = 5 + MemorySize = 5 + MemoryGrow = 1000000 + I32Const = 5 + I64Const = 5 + F32Const = 5 + F64Const = 5 + RefNull = 5 + RefIsNull = 5 + RefFunc = 5 + I32Eqz = 5 + I32Eq = 5 + I32Ne = 5 + I32LtS = 5 + I32LtU = 5 + I32GtS = 5 + I32GtU = 5 + I32LeS = 5 + I32LeU = 5 + I32GeS = 5 + I32GeU = 5 + I64Eqz = 5 + I64Eq = 5 + I64Ne = 5 + I64LtS = 5 + I64LtU = 5 + I64GtS = 5 + I64GtU = 5 + I64LeS = 5 + I64LeU = 5 + I64GeS = 5 + I64GeU = 5 + F32Eq = 6 + F32Ne = 6 + F32Lt = 6 + F32Gt = 6 + F32Le = 6 + F32Ge = 6 + F64Eq = 6 + F64Ne = 6 + F64Lt = 6 + F64Gt = 6 + F64Le = 6 + F64Ge = 6 + I32Clz = 100 + I32Ctz = 100 + I32Popcnt = 100 + I32Add = 5 + I32Sub = 5 + I32Mul = 5 + I32DivS = 18 + I32DivU = 18 + I32RemS = 18 + I32RemU = 18 + I32And = 5 + I32Or = 5 + I32Xor = 5 + I32Shl = 5 + I32ShrS = 5 + I32ShrU = 5 + I32Rotl = 5 + I32Rotr = 5 + I64Clz = 100 + I64Ctz = 100 + I64Popcnt = 100 + I64Add = 5 + I64Sub = 5 + I64Mul = 5 + I64DivS = 18 + I64DivU = 18 + I64RemS = 18 + I64RemU = 18 + I64And = 5 + I64Or = 5 + I64Xor = 5 + I64Shl = 5 + I64ShrS = 5 + I64ShrU = 5 + I64Rotl = 5 + I64Rotr = 5 + F32Abs = 5 + F32Neg = 5 + F32Ceil = 100 + F32Floor = 100 + F32Trunc = 100 + F32Nearest = 100 + F32Sqrt = 100 + F32Add = 5 + F32Sub = 5 + F32Mul = 15 + F32Div = 100 + F32Min = 15 + F32Max = 15 + F32Copysign = 5 + F64Abs = 5 + F64Neg = 5 + F64Ceil = 100 + F64Floor = 100 + F64Trunc = 100 + F64Nearest = 100 + F64Sqrt = 100 + F64Add = 5 + F64Sub = 5 + F64Mul = 15 + F64Div = 100 + F64Min = 15 + F64Max = 15 + F64Copysign = 5 + I32WrapI64 = 9 + I32TruncF32S = 100 + I32TruncF32U = 100 + I32TruncF64S = 100 + I32TruncF64U = 100 + I64ExtendI32S = 9 + I64ExtendI32U = 9 + I64TruncF32S = 100 + I64TruncF32U = 100 + I64TruncF64S = 100 + I64TruncF64U = 100 + F32ConvertI32S = 100 + F32ConvertI32U = 100 + F32ConvertI64S = 100 + F32ConvertI64U = 100 + F32DemoteF64 = 100 + F64ConvertI32S = 100 + F64ConvertI32U = 100 + F64ConvertI64S = 100 + F64ConvertI64U = 100 + F64PromoteF32 = 100 + I32ReinterpretF32 = 100 + I64ReinterpretF64 = 100 + F32ReinterpretI32 = 100 + F64ReinterpretI64 = 100 + I32Extend8S = 9 + I32Extend16S = 9 + I64Extend8S = 9 + I64Extend16S = 9 + I64Extend32S = 9 + I32TruncSatF32S = 100 + I32TruncSatF32U = 100 + I32TruncSatF64S = 100 + I32TruncSatF64U = 100 + I64TruncSatF32S = 100 + I64TruncSatF32U = 100 + I64TruncSatF64S = 100 + I64TruncSatF64U = 100 + MemoryInit = 5 + DataDrop = 5 + MemoryCopy = 5 + MemoryFill = 5 + TableInit = 10 + ElemDrop = 10 + TableCopy = 10 + TableFill = 10 + TableGet = 10 + TableSet = 10 + TableGrow = 10 + TableSize = 10 + AtomicNotify = 1000000 + I32AtomicWait = 1000000 + I64AtomicWait = 1000000 + AtomicFence = 1000000 + I32AtomicLoad = 1000000 + I64AtomicLoad = 1000000 + I32AtomicLoad8U = 1000000 + I32AtomicLoad16U = 1000000 + I64AtomicLoad8U = 1000000 + I64AtomicLoad16U = 1000000 + I64AtomicLoad32U = 1000000 + I32AtomicStore = 1000000 + I64AtomicStore = 1000000 + I32AtomicStore8 = 1000000 + I32AtomicStore16 = 1000000 + I64AtomicStore8 = 1000000 + I64AtomicStore16 = 1000000 + I64AtomicStore32 = 1000000 + I32AtomicRmwAdd = 1000000 + I64AtomicRmwAdd = 1000000 + I32AtomicRmw8AddU = 1000000 + I32AtomicRmw16AddU = 1000000 + I64AtomicRmw8AddU = 1000000 + I64AtomicRmw16AddU = 1000000 + I64AtomicRmw32AddU = 1000000 + I32AtomicRmwSub = 1000000 + I64AtomicRmwSub = 1000000 + I32AtomicRmw8SubU = 1000000 + I32AtomicRmw16SubU = 1000000 + I64AtomicRmw8SubU = 1000000 + I64AtomicRmw16SubU = 1000000 + I64AtomicRmw32SubU = 1000000 + I32AtomicRmwAnd = 1000000 + I64AtomicRmwAnd = 1000000 + I32AtomicRmw8AndU = 1000000 + I32AtomicRmw16AndU = 1000000 + I64AtomicRmw8AndU = 1000000 + I64AtomicRmw16AndU = 1000000 + I64AtomicRmw32AndU = 1000000 + I32AtomicRmwOr = 1000000 + I64AtomicRmwOr = 1000000 + I32AtomicRmw8OrU = 1000000 + I32AtomicRmw16OrU = 1000000 + I64AtomicRmw8OrU = 1000000 + I64AtomicRmw16OrU = 1000000 + I64AtomicRmw32OrU = 1000000 + I32AtomicRmwXor = 1000000 + I64AtomicRmwXor = 1000000 + I32AtomicRmw8XorU = 1000000 + I32AtomicRmw16XorU = 1000000 + I64AtomicRmw8XorU = 1000000 + I64AtomicRmw16XorU = 1000000 + I64AtomicRmw32XorU = 1000000 + I32AtomicRmwXchg = 1000000 + I64AtomicRmwXchg = 1000000 + I32AtomicRmw8XchgU = 1000000 + I32AtomicRmw16XchgU = 1000000 + I64AtomicRmw8XchgU = 1000000 + I64AtomicRmw16XchgU = 1000000 + I64AtomicRmw32XchgU = 1000000 + I32AtomicRmwCmpxchg = 1000000 + I64AtomicRmwCmpxchg = 1000000 + I32AtomicRmw8CmpxchgU = 1000000 + I32AtomicRmw16CmpxchgU = 1000000 + I64AtomicRmw8CmpxchgU = 1000000 + I64AtomicRmw16CmpxchgU = 1000000 + I64AtomicRmw32CmpxchgU = 1000000 + V128Load = 1000000 + V128Store = 1000000 + V128Const = 1000000 + I8x16Splat = 1000000 + I8x16ExtractLaneS = 1000000 + I8x16ExtractLaneU = 1000000 + I8x16ReplaceLane = 1000000 + I16x8Splat = 1000000 + I16x8ExtractLaneS = 1000000 + I16x8ExtractLaneU = 1000000 + I16x8ReplaceLane = 1000000 + I32x4Splat = 1000000 + I32x4ExtractLane = 1000000 + I32x4ReplaceLane = 1000000 + I64x2Splat = 1000000 + I64x2ExtractLane = 1000000 + I64x2ReplaceLane = 1000000 + F32x4Splat = 1000000 + F32x4ExtractLane = 1000000 + F32x4ReplaceLane = 1000000 + F64x2Splat = 1000000 + F64x2ExtractLane = 1000000 + F64x2ReplaceLane = 1000000 + I8x16Eq = 1000000 + I8x16Ne = 1000000 + I8x16LtS = 1000000 + I8x16LtU = 1000000 + I8x16GtS = 1000000 + I8x16GtU = 1000000 + I8x16LeS = 1000000 + I8x16LeU = 1000000 + I8x16GeS = 1000000 + I8x16GeU = 1000000 + I16x8Eq = 1000000 + I16x8Ne = 1000000 + I16x8LtS = 1000000 + I16x8LtU = 1000000 + I16x8GtS = 1000000 + I16x8GtU = 1000000 + I16x8LeS = 1000000 + I16x8LeU = 1000000 + I16x8GeS = 1000000 + I16x8GeU = 1000000 + I32x4Eq = 1000000 + I32x4Ne = 1000000 + I32x4LtS = 1000000 + I32x4LtU = 1000000 + I32x4GtS = 1000000 + I32x4GtU = 1000000 + I32x4LeS = 1000000 + I32x4LeU = 1000000 + I32x4GeS = 1000000 + I32x4GeU = 1000000 + F32x4Eq = 1000000 + F32x4Ne = 1000000 + F32x4Lt = 1000000 + F32x4Gt = 1000000 + F32x4Le = 1000000 + F32x4Ge = 1000000 + F64x2Eq = 1000000 + F64x2Ne = 1000000 + F64x2Lt = 1000000 + F64x2Gt = 1000000 + F64x2Le = 1000000 + F64x2Ge = 1000000 + V128Not = 1000000 + V128And = 1000000 + V128AndNot = 1000000 + V128Or = 1000000 + V128Xor = 1000000 + V128Bitselect = 1000000 + I8x16Neg = 1000000 + I8x16AnyTrue = 1000000 + I8x16AllTrue = 1000000 + I8x16Shl = 1000000 + I8x16ShrS = 1000000 + I8x16ShrU = 1000000 + I8x16Add = 1000000 + I8x16AddSaturateS = 1000000 + I8x16AddSaturateU = 1000000 + I8x16Sub = 1000000 + I8x16SubSaturateS = 1000000 + I8x16SubSaturateU = 1000000 + I8x16MinS = 1000000 + I8x16MinU = 1000000 + I8x16MaxS = 1000000 + I8x16MaxU = 1000000 + I8x16Mul = 1000000 + I16x8Neg = 1000000 + I16x8AnyTrue = 1000000 + I16x8AllTrue = 1000000 + I16x8Shl = 1000000 + I16x8ShrS = 1000000 + I16x8ShrU = 1000000 + I16x8Add = 1000000 + I16x8AddSaturateS = 1000000 + I16x8AddSaturateU = 1000000 + I16x8Sub = 1000000 + I16x8SubSaturateS = 1000000 + I16x8SubSaturateU = 1000000 + I16x8Mul = 1000000 + I16x8MinS = 1000000 + I16x8MinU = 1000000 + I16x8MaxS = 1000000 + I16x8MaxU = 1000000 + I32x4Neg = 1000000 + I32x4AnyTrue = 1000000 + I32x4AllTrue = 1000000 + I32x4Shl = 1000000 + I32x4ShrS = 1000000 + I32x4ShrU = 1000000 + I32x4Add = 1000000 + I32x4Sub = 1000000 + I32x4Mul = 1000000 + I32x4MinS = 1000000 + I32x4MinU = 1000000 + I32x4MaxS = 1000000 + I32x4MaxU = 1000000 + I64x2Neg = 1000000 + I64x2AnyTrue = 1000000 + I64x2AllTrue = 1000000 + I64x2Shl = 1000000 + I64x2ShrS = 1000000 + I64x2ShrU = 1000000 + I64x2Add = 1000000 + I64x2Sub = 1000000 + I64x2Mul = 1000000 + F32x4Abs = 1000000 + F32x4Neg = 1000000 + F32x4Sqrt = 1000000 + F32x4Add = 1000000 + F32x4Sub = 1000000 + F32x4Mul = 1000000 + F32x4Div = 1000000 + F32x4Min = 1000000 + F32x4Max = 1000000 + F64x2Abs = 1000000 + F64x2Neg = 1000000 + F64x2Sqrt = 1000000 + F64x2Add = 1000000 + F64x2Sub = 1000000 + F64x2Mul = 1000000 + F64x2Div = 1000000 + F64x2Min = 1000000 + F64x2Max = 1000000 + I32x4TruncSatF32x4S = 1000000 + I32x4TruncSatF32x4U = 1000000 + I64x2TruncSatF64x2S = 1000000 + I64x2TruncSatF64x2U = 1000000 + F32x4ConvertI32x4S = 1000000 + F32x4ConvertI32x4U = 1000000 + F64x2ConvertI64x2S = 1000000 + F64x2ConvertI64x2U = 1000000 + V8x16Swizzle = 1000000 + V8x16Shuffle = 1000000 + V8x16LoadSplat = 1000000 + V16x8LoadSplat = 1000000 + V32x4LoadSplat = 1000000 + V64x2LoadSplat = 1000000 + I8x16NarrowI16x8S = 1000000 + I8x16NarrowI16x8U = 1000000 + I16x8NarrowI32x4S = 1000000 + I16x8NarrowI32x4U = 1000000 + I16x8WidenLowI8x16S = 1000000 + I16x8WidenHighI8x16S = 1000000 + I16x8WidenLowI8x16U = 1000000 + I16x8WidenHighI8x16U = 1000000 + I32x4WidenLowI16x8S = 1000000 + I32x4WidenHighI16x8S = 1000000 + I32x4WidenLowI16x8U = 1000000 + I32x4WidenHighI16x8U = 1000000 + I16x8Load8x8S = 1000000 + I16x8Load8x8U = 1000000 + I32x4Load16x4S = 1000000 + I32x4Load16x4U = 1000000 + I64x2Load32x2S = 1000000 + I64x2Load32x2U = 1000000 + I8x16RoundingAverageU = 1000000 + I16x8RoundingAverageU = 1000000 + LocalAllocate = 5 + LocalsUnmetered = 100 + MaxMemoryGrowDelta = 1 + MaxMemoryGrow = 10 + Catch = 10 + CatchAll = 10 + Delegate = 10 + Rethrow = 10 + ReturnCall = 10 + ReturnCallIndirect = 10 + Throw = 10 + Try = 10 + Unwind = 10 + F32x4Ceil = 1000000 + F32x4DemoteF64x2Zero = 1000000 + F32x4Floor = 1000000 + F32x4Nearest = 1000000 + F32x4PMax = 1000000 + F32x4PMin = 1000000 + F32x4Trunc = 1000000 + F64x2Ceil = 1000000 + F64x2ConvertLowI32x4S = 1000000 + F64x2ConvertLowI32x4U = 1000000 + F64x2Floor = 1000000 + F64x2Nearest = 1000000 + F64x2PMax = 1000000 + F64x2PMin = 1000000 + F64x2PromoteLowF32x4 = 1000000 + F64x2Trunc = 1000000 + I16x8Abs = 1000000 + I16x8AddSatS = 1000000 + I16x8AddSatU = 1000000 + I16x8Bitmask = 1000000 + I16x8ExtAddPairwiseI8x16S = 1000000 + I16x8ExtAddPairwiseI8x16U = 1000000 + I16x8ExtMulHighI8x16S = 1000000 + I16x8ExtMulHighI8x16U = 1000000 + I16x8ExtMulLowI8x16S = 1000000 + I16x8ExtMulLowI8x16U = 1000000 + I16x8ExtendHighI8x16S = 1000000 + I16x8ExtendHighI8x16U = 1000000 + I16x8ExtendLowI8x16S = 1000000 + I16x8ExtendLowI8x16U = 1000000 + I16x8Q15MulrSatS = 1000000 + I16x8SubSatS = 1000000 + I16x8SubSatU = 1000000 + I32x4Abs = 1000000 + I32x4Bitmask = 1000000 + I32x4DotI16x8S = 1000000 + I32x4ExtAddPairwiseI16x8S = 1000000 + I32x4ExtAddPairwiseI16x8U = 1000000 + I32x4ExtMulHighI16x8S = 1000000 + I32x4ExtMulHighI16x8U = 1000000 + I32x4ExtMulLowI16x8S = 1000000 + I32x4ExtMulLowI16x8U = 1000000 + I32x4ExtendHighI16x8S = 1000000 + I32x4ExtendHighI16x8U = 1000000 + I32x4ExtendLowI16x8S = 1000000 + I32x4ExtendLowI16x8U = 1000000 + I32x4TruncSatF64x2SZero = 1000000 + I32x4TruncSatF64x2UZero = 1000000 + I64x2Abs = 1000000 + I64x2Bitmask = 1000000 + I64x2Eq = 1000000 + I64x2ExtMulHighI32x4S = 1000000 + I64x2ExtMulHighI32x4U = 1000000 + I64x2ExtMulLowI32x4S = 1000000 + I64x2ExtMulLowI32x4U = 1000000 + I64x2ExtendHighI32x4S = 1000000 + I64x2ExtendHighI32x4U = 1000000 + I64x2ExtendLowI32x4S = 1000000 + I64x2ExtendLowI32x4U = 1000000 + I64x2GeS = 1000000 + I64x2GtS = 1000000 + I64x2LeS = 1000000 + I64x2LtS = 1000000 + I64x2Ne = 1000000 + I8x16Abs = 1000000 + I8x16AddSatS = 1000000 + I8x16AddSatU = 1000000 + I8x16Bitmask = 1000000 + I8x16Popcnt = 1000000 + I8x16Shuffle = 1000000 + I8x16SubSatS = 1000000 + I8x16SubSatU = 1000000 + I8x16Swizzle = 1000000 + MemoryAtomicNotify = 1000000 + MemoryAtomicWait32 = 1000000 + MemoryAtomicWait64 = 1000000 + V128AnyTrue = 1000000 + V128Load16Lane = 1000000 + V128Load16Splat = 1000000 + V128Load16x4S = 1000000 + V128Load16x4U = 1000000 + V128Load32Lane = 1000000 + V128Load32Splat = 1000000 + V128Load32Zero = 1000000 + V128Load32x2S = 1000000 + V128Load32x2U = 1000000 + V128Load64Lane = 1000000 + V128Load64Splat = 1000000 + V128Load64Zero = 1000000 + V128Load8Lane = 1000000 + V128Load8Splat = 1000000 + V128Load8x8S = 1000000 + V128Load8x8U = 1000000 + V128Store16Lane = 1000000 + V128Store32Lane = 1000000 + V128Store64Lane = 1000000 + V128Store8Lane = 1000000 + +[MaxPerTransaction] + MaxBuiltInCallsPerTx = 100 + MaxNumberOfTransfersPerTx = 250 + MaxNumberOfTrieReadsPerTx = 1500 + +# Quadratic, Linear and Constant are the coefficients for a quadratic func. Separate variables are used for the +# sign of each coefficient, 0 meaning positive and 1 meaning negative +# The current values for the coefficients were computed based on benchmarking. +# For the given coefficients, the minimum of the function must not be lower than MinimumGasCost +[DynamicStorageLoad] + QuadraticCoefficient = 688 + SignOfQuadratic = 0 + LinearCoefficient = 31858 + SignOfLinear = 0 + ConstantCoefficient = 15287 + SignOfConstant = 0 + MinimumGasCost = 10000 diff --git a/cmd/node/config/genesis.json b/cmd/node/config/genesis.json index 10cc1e97d95..27f74229b85 100644 --- a/cmd/node/config/genesis.json +++ b/cmd/node/config/genesis.json @@ -1,92 +1,497 @@ [ { - "address": "erd1ulhw20j7jvgfgak5p05kv667k5k9f320sgef5ayxkt9784ql0zssrzyhjp", - "supply": "2222222222222222222222224", - "balance": "2219722222222222222222224", + "info": "delegator1 for legacy delegation", + "address": "erd1z48u9l275l2uy4augfytpp2355wvdnc4gwc9ms9gdhdqru3fz9eq5wqe3e", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd17c4fs6mz2aa2hcvva2jfxdsrdknu4220496jmswer9njznt22eds0rxlr4", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator2 for legacy delegation", + "address": "erd1qm5erxm0va3hcw9jfxtp2gl3u9q9p4k242pk9xx3vezefkgj2vhs0wk8cx", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd10d2gufxesrp8g409tzxljlaefhs0rsgjle3l7nq38de59txxt8csj54cd3", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator3 for legacy delegation", + "address": "erd1rqwt9vpfn072tahtcvup2dxz4nvqfs3n5p9eh0jnypkppxmdheaqpcqfzz", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd1e0vueugj66l5cgrz83se0a74c3hst7u4w55t3usfa3at8yhfq94qtajf2c", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator4 for legacy delegation", + "address": "erd17uygg7qq05mjhectgymj6fwq59ysr4p92cy0yz3jrxxj6253p40sj77wr6", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd1fn9faxsh6felld6c2vd82par6nzshkj609550qu3dngh8faxjz5syukjcq", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator5 for legacy delegation", + "address": "erd1qslr87nj5gkv2396js3fa2za5kqgwugqnz4j4qqh22mxpnse2lws8srsq6", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd12ymx62jlp0dez40slu22dxmese5fl0rwrtqzlnff844rtltnlpdse9ecsm", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator6 for legacy delegation", + "address": "erd17pjlqg55c6v3fjvpqec8peefk74g8neygr84ymw7cqzudmzaw7lqnln7sz", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd1qsrfugd567kv68sysp455cshqr30257c8jnuq2q7zct943w82feszr8n32", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator7 for legacy delegation", + "address": "erd19ztfuew6ejrwq5mpax4xztjwh5j63u9vge4dum9vlyy7hg3pc86qgmt6nm", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd153a3wkfng4cupvkd86k07nl0acq548s72xr3yvpjut6u6fnpzads9zyq37", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator8 for legacy delegation", + "address": "erd1age5t5qfrke4vm47vaq9a4yewllh6227qm4fcy3rc7g5ktmzyatsgf4wcw", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd1yajssshtsc75x87cxvylnwu4r9dv3c2tegufrd07fjmw72krlq9spmw32d", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator9 for legacy delegation", + "address": "erd1jt0vv29trqs3nddzkxsf950xx0t5uvyncmuamwryneh9wsee5wpsgue96d", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" + } + }, + { + "info": "delegator10 for legacy delegation", + "address": "erd1c83rmk3n8ys4g9dkg3q70thx3v787qtpfmk23epu4xsadpyd3dnsejf2r7", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", + "stakingvalue": "0", + "delegation": { + "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", + "value": "1000000000000000000000" + } + }, + { + "info": "wallet1 2500*8 staked + 10000 initial balance", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "supply": "30000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "20000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet2 2500*6 staked + 10000 initial balance", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "supply": "25000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "15000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet3 2500*4 staked + 10000 initial balance", + "address": "erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35", + "supply": "20000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "10000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet4 2500*4 staked + 10000 initial balance", + "address": "erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84", + "supply": "20000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "10000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet5 2500*3 staked + 10000 initial balance", + "address": "erd1dzjes5c6a8ru45clgla3q0k3ezm06svefjz7vzs8pjfnrqa8tcasl4j8hs", + "supply": "17500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "7500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet6 2500*3 staked + 10000 initial balance", + "address": "erd14gg3v6j4505ucx7t2wtl98tgupmyp748aq92jefmp5ha6e3pccgq9clwe9", + "supply": "17500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "7500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet7 2500*2 staked + 10000 initial balance", + "address": "erd1xdfc44mk4ut5cv6l3mq0py6h88cty9ykacskm8xv3tvrp893kmxqppcefg", + "supply": "15000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "5000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet8 2500*2 staked + 10000 initial balance", + "address": "erd1997jfwzrum4rrk59ar5supcyge9rpa73xgv2p45h3unt880v399svt8c9g", + "supply": "15000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "5000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet9 2500 staked + 10000 initial balance", + "address": "erd1e9cg9ys8fh77n9eaxpg47sxaes4fe9g2nvy6a65qpxykcx8grg9sv45lss", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet10 2500 staked + 10000 initial balance", + "address": "erd1xdrltsygywhmtxzsmrgjlsxsxrf4y2ayv0z50y666dgsp66trxwqzajk96", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet11 2500 staked + 10000 initial balance", + "address": "erd1lytewufjflpwl6gtf0faazjr59nd2fhfwlk7ew72hkpgdkmunl8qfrpywg", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet12 2500 staked + 10000 initial balance", + "address": "erd1s8tqztm4u4gw23489lps97qxe8vck8eln3a424y9c6yujsc96nas0l968d", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet13 2500 staked + 10000 initial balance", + "address": "erd1p7p0f3n8dxtj08hsp9hccqg932pd4f94rq3adg6g55etx8g4z8tsmg5e0g", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet14 2500 staked + 10000 initial balance", + "address": "erd1uyeel03ea837dphrx2ak77hdvlhjdcqdwgyg6k99gqn602ymsn7qptmedj", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet15 2500 staked + 10000 initial balance", + "address": "erd1ftyzkdhl7rl782mrzrdc2jck3egydp0ydzhcjm9gc8s2jym5egrqadl4h6", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet16 2500*3 staked + 10000 initial balance", + "address": "erd1rsl2sj5g87ltfq0hvrmgm35mlg4lzfs29p4gzxh0lh4vj2e8ykuqh69lha", + "supply": "17500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "7500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet17 2500*2 staked + 10000 initial balance", + "address": "erd19yrjty2l4ytl6d3jynp5mqfekq4uf2x93akz60w7l3cp6qzny3psnfyerw", + "supply": "15000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "5000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet18 2500 staked + 10000 initial balance", + "address": "erd148lq42zdzz34y0yr8avldsy7gw0rmuvj4lmstzug77v08z3q0ncszfk8w9", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet19 2500 staked + 10000 initial balance", + "address": "erd1k2v4h3805gnxf78c22g7lfe4pgq2lmr4ezmkk2rqkej6yjd7g5ssu88fme", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet20 2500 staked + 10000 initial balance", + "address": "erd1nzjyj2ykpway04pczl42fgrlza2f0eaf97fxgnuuw39vyee36xlqccc3qz", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet21 2500 staked + 10000 initial balance", + "address": "erd1yp0nvml5c45us3qzreqxkjxaakxn744t3gdva9s8xndcakzawutstepmm5", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet22 2500 staked + 10000 initial balance", + "address": "erd1qyg80tr4rd65ur3hedm9h4yv3fcwmm6vnyrypnm972nd80889hxqdfgwrc", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet23 2500 staked + 10000 initial balance", + "address": "erd14x6d48q59zjh5p909fyw7e46czftgdawyf734cnmgk5e63ghrvvsqp254t", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet24 2500 staked + 10000 initial balance", + "address": "erd1wyxylus33e476h5kta7e0caeqvgvcgrxh0az33e7szya6g7mh2ws0n27sa", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet25 2500 staked + 10000 initial balance", + "address": "erd1v3ylw7t6vzjzs06xjf6ccmf576ud38g2ws45tjkjg48s38jefpzqlwms9w", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet26 2500 staked + 10000 initial balance", + "address": "erd1twel4azu6uptw878y063p93mjr84y5m4kpsww2aeqj4pg5jeplgst04rhg", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet27 2500 staked + 10000 initial balance", + "address": "erd1q2se75ucl9as9j7e48v00jrnj6hvtk5vqxa4a3ag5729vctsdkasm20cyc", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet28 2500 staked + 10000 initial balance", + "address": "erd18cc6cm35xhv7kzwsm79l4ma6jpz3ee5l0yjxuc66kh6rcgtawtuq6lzp9f", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet29 2500 staked + 10000 initial balance", + "address": "erd1psux99h4jljyt3nkw8pruv3spw5r0unqe4wk8837mm9my88gl28qj6mml5", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet30 2500 staked + 10000 initial balance", + "address": "erd1vgm89ngmv2ghzsyq8xjtt45crekkxnhsq30yxzlq86uc3ra3r57qa3mw2p", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet31 2500 staked + 10000 initial balance", + "address": "erd1k767vmmn8vg8xvuny32ppwr4dxrlgmpykn0u7nm92evlag3wkukqdgsf5u", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet32 2500 staked + 10000 initial balance", + "address": "erd1hwe8lskmzsdpuy3f6hldamvn0zrhzldec8m4tt8hupq58d7gyrequy8wsp", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet33 2500 staked + 10000 initial balance", + "address": "erd125eyrjk99zadr04gm9z2p4nckmnegexs5nyk7ek85rut2665t75sql3w88", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet34 no staking, initial funds - 10 million EGLD", + "address": "erd17ndrqg38lqf2zjgeqvle90rsn9ejrd9upx8evkyvh8e0m5xlph5scv9l6n", + "supply": "10000000000000000000000000", + "balance": "10000000000000000000000000", + "stakingvalue": "0", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet35 no staking, initial funds - 9509990 EGLD", + "address": "erd1zed89c8226rs7f59zh2xea39qk9ym9tsmt4s0sg2uw7u9nvtzt3q8fdj2e", + "supply": "9509990000000000000000000", + "balance": "9509990000000000000000000", + "stakingvalue": "0", + "delegation": { + "address": "", + "value": "0" } } -] \ No newline at end of file +] diff --git a/cmd/node/config/genesisContracts/dns.wasm b/cmd/node/config/genesisContracts/dns.wasm index ea613050171..ce692a1260b 100644 Binary files a/cmd/node/config/genesisContracts/dns.wasm and b/cmd/node/config/genesisContracts/dns.wasm differ diff --git a/cmd/node/config/genesisSmartContracts.json b/cmd/node/config/genesisSmartContracts.json index f102c18d489..198798c36fe 100644 --- a/cmd/node/config/genesisSmartContracts.json +++ b/cmd/node/config/genesisSmartContracts.json @@ -11,7 +11,7 @@ "owner": "erd188anxz35atlef7cucszypmvx88lhz4m7a7t7lhcwt6sfphpsqlkswfhcx2", "filename": "./config/genesisContracts/dns.wasm", "vm-type": "0500", - "init-parameters": "056bc75e2d63100000", + "init-parameters": "00", "type": "dns", "version": "0.2.*" } diff --git a/cmd/node/config/nodesSetup.json b/cmd/node/config/nodesSetup.json index 239fd9a52f6..741d9009ad8 100644 --- a/cmd/node/config/nodesSetup.json +++ b/cmd/node/config/nodesSetup.json @@ -1,48 +1,395 @@ { "startTime": 0, - "roundDuration": 4000, - "consensusGroupSize": 3, - "minNodesPerShard": 3, - "metaChainConsensusGroupSize": 3, - "metaChainMinNodes": 3, - "hysteresis": 0, + "roundDuration": 6000, + "consensusGroupSize": 7, + "minNodesPerShard": 10, + "metaChainConsensusGroupSize": 10, + "metaChainMinNodes": 10, + "hysteresis": 0.2, "adaptivity": false, "initialNodes": [ { - "pubkey": "cbc8c9a6a8d9c874e89eb9366139368ae728bd3eda43f173756537877ba6bca87e01a97b815c9f691df73faa16f66b15603056540aa7252d73fecf05d24cd36b44332a88386788fbdb59d04502e8ecb0132d8ebd3d875be4c83e8b87c55eb901", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - legacy delegation", + "pubkey": "309198fd7b6bccb1b6279ea2e5c5f2a33b9f6fe5f23180778d8721710344989b07d10dd1bd19307b3cd06eab9d1358062511610ccdad681ae1165016256cc1fdc0fed5041a5c29d7773b2994022bf2dc9efb937b3bb7cc9d72670448fad7d091", + "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", + "initialRating": 5000001 }, { - "pubkey": "ef9522d654bc08ebf2725468f41a693aa7f3cf1cb93922cff1c8c81fba78274016010916f4a7e5b0855c430a724a2d0b3acd1fe8e61e37273a17d58faa8c0d3ef6b883a33ec648950469a1e9757b978d9ae662a019068a401cff56eea059fd08", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - legacy delegation", + "pubkey": "cd8cc9d70a8f78df0922470f1ebee727f57a70fb0571c0b512475c8c7d3ce1d9b70dd28e6582c038b18d05f8fbd6ac0a167a38614c40353b32ef47896d13c45bde57e86d87dd6e6f73420db93aeb79a1fd8f4e297f70478685a38ed73e49598f", + "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", + "initialRating": 5000001 }, { - "pubkey": "e91ab494cedd4da346f47aaa1a3e792bea24fb9f6cc40d3546bc4ca36749b8bfb0164e40dbad2195a76ee0fd7fb7da075ecbf1b35a2ac20638d53ea5520644f8c16952225c48304bb202867e2d71d396bff5a5971f345bcfe32c7b6b0ca34c84", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - legacy delegation", + "pubkey": "e441b8a7d35545f7a02f0d57d144c89810d67ecb9abb5804d1bcfd8b270dc30978c56fbf9618e9a6621ca4e6c545c90807f7fe5edfd34ccab83db7bc0bd68536bb65403503d213c9763ec2137d80081579bb1b327f22c677bdb19891f4aae980", + "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", + "initialRating": 5000001 }, { - "pubkey": "8f8bf2e6ad1566cd06ba968b319d264b8ce4f8700032a88556c2ecc3992017654d69d9661ad67b12c8e49289a2925a0c3ab3c161a22c16e772a4fe8a84b273b7ac7c00d9da8fa90a9bb710961faa6e0e2e092f383f2fc365f1cda35d803f0901", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - legacy delegation", + "pubkey": "72d8341713a271d510c7dfd02455ef86e9af4c66e06ac28fbb4c4e8df1e944e685bae2bee2af4101c19922b64e44e40b5d4905755594a719c1d50dc210515495d0de9b3a1d4ed42fd45a973353fe2c2548c45bb2157d8bf68e0937cc20fe1011", + "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", + "initialRating": 5000001 }, { - "pubkey": "aa930dc117738baead60088e9fd53ebc3157ad219f6a11ad4ee662eedb406baad013160ec1083fa68bf25b4ce7503e00e0e6dfbb4e405107a350d88feda2d01ae5b7b27a068d6accc980e498b36fc9ab1df4f3bcffec9f1611e20dea05b55a92", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "796d2f56ee9fa8f4ff9fc7735bbb4d644be8dd044f7f965362aed3d562e00b11f730b2fe21eb9c9fd100b68a5d3dbf07bae63d25739f1304ab638330f0e8c207a76de648e2523ad1693f4ffe9def8a1d5aca2c6c1c14e1fcc20089db069d1a0e", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 }, { - "pubkey": "70cf21360c0d276bb49af3a76e1bc193f05f688c0f8029a895742dbc4713fe2c36b8a90dd9455b308c3fbf5e3a3ea115ec1a6c353af028d104402a0f1813d6178740b62911470d75eab62ae630d7f1181c68fc1e966967749dc98eab35c03f0c", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "4cc0bfcdf4515927e3ae7890197bfd7c2e3c6f80ff7570fc6313d095e0d4e518ecc81db7087fefb8af166723a32ded1324f4ee00a6a97d206a024fd95ab60f7fe18c896c829ac43782c56d922415fb4ddbc0384936e3f860feb0666da43bcd19", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 }, { - "pubkey": "ea4a05326f44746beff6302f4a0452ad789113186ede483a577294d3bdf638a0742a57d453edbc61db32e04e101b7c021a1480a8d4989856a83b375d66fe61df64effc0cb68a18bebbc99b7e12ebc3084c17599b83bba33c435b8953974d2484", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "3449c9d672472ea52e83be4a8d6ce55785044233d848ac0a9a4b7fc22bf5b1bf5776f95f4b459f4e4cd3ba1d20b46a197c5a55ec00fa8d795d35f2232a6fae360129d186e3a0c219d42d8357de2823c8566da7841c2739b30d1581c4a38ec80e", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 }, { - "pubkey": "86b5dcfb9372b0865f0531782827bed66cb7313ab0924c052d3701c59d3c686748e757bb9e20ad1924d3531dc1eb1206f89d00791e79ea994e0a8b5d4ef92335f0d83f09cc358b718b103dd44d772e2286123ceffb6bd8236b8be7e4eb3e1308", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "1bdef34453a83d19748b02d964c43409a66535678b6234c42d289ed2b7571bf60d35ba7a43cd951d4fc9fc2e8ff618038a2edc9cb0181dcac0b62a0859aafd8d9993aa3069c14fec11cb66a653311a37861d225847bf535bcb920b4f0ea98b8b", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 }, { - "pubkey": "227a5a5ec0c58171b7f4ee9ecc304ea7b176fb626741a25c967add76d6cd361d6995929f9b60a96237381091cefb1b061225e5bb930b40494a5ac9d7524fd67dfe478e5ccd80f17b093cff5722025761fb0217c39dbd5ae45e01eb5a3113be93", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "3bd60bd8c5ace7d1999cd4bfd93dcb7bdc397d8d84efa5fd89439b7b727b0331bd00e3feae85df79e7d2b5eba1ea07003972fde3a7eb8d4ba25583a848be812e89b75fe8f3531d810ba2aaef629748ace6ac5ae73d8a2e6a65bb379f5be3b906", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 + }, + { + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "2b9a1e5e291471f9eb0cf1a52db991f2dbb85446d132f47381b7912b041be00cccab748c25bdd6165cd6a517b6e99b0133bda4dc091dcdf6d17bc460ac0e8c6fe4b2460a980dd8dea8c857647bc5826a2b77fc8ba92c02deb2ba3daafb4d5407", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 + }, + { + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "e464c43c4f3442ec520d4a6464d7fa96397ab711adf38b5a96f208303337f6a97ffdbd22e34a10deef6aa21ff078360d2bf7fae627a1ec55a9f120b35224b8e461b0f4de7d3ce800e6b910f37c4d03cce7039ce3a4a3a79ac0511c36435ccf85", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 + }, + { + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "dab5c34e66e096d1f328fd9b045136724c8ccbf7b5a4bcf1e8c9edc9510712c0a7feff7818563aa799b37f1cdcfb330cc49d8482c7154988d33f63fe2526b27945326112c832fdf72a1b35f10da34c6e08b4079d9c56195c1ab64c84eab93b95", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 + }, + { + "info": "multikey - group1 - wallet2 with 6 BLS keys", + "pubkey": "a5624bec34e06d5026a334654be9e0118c8a02720a6bd868a51d5eb687819442cded09d1fef2e9d9db8bb2d5be01f1148b4819aee9e6a48b9c530285dbc4d800f4dd10d7f9a75d4b36de8fb52aec672cec91e0256f7e9848b10219748d9e708b", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet2 with 6 BLS keys", + "pubkey": "eedbc5a3141b92148205d178d150905e68ca745ba54617025f84b33c91233afda2c2109084821e14f9a50d3f220fbc000ce6ad432f2a1865da9c6547016ecc7e07242ef490c0bdda29ec677f3e833f54eb2cf27e95b10b8edbdfa7de4e1bc000", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet2 with 6 BLS keys", + "pubkey": "48c77bbf5d619fb13d76883736c664493b323c00f89131c9d70b3e4ac875103bd8248e790e47a82c9fdcd46fe33b52093a4b3b248ce20e6f958acd22dfb17335fcaf752bab5e29934f0a7e0af54fb2f51a9e6b1be30abdd701f7c9fbd0ad5d8e", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet2 with 6 BLS keys", + "pubkey": "dd59e326a160d3b49ca25f2ab93b4f7ba766b124de66b68507b9e7a9cf69df7c4fca695592eb31e7e63061daef52d30cc1d362fc612d22631398cad4af46969e35407b293808133fc130b8f930ba41c6b88bc9ed9b884892113593d3ffc55297", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet2 with 6 BLS keys", + "pubkey": "6b1a8bf3e5e7bacaaf99e3c89891a8a4ec7e9022986043f8206db9171ede3bd8cdcbbd7e8e1234180de5d651110ef706a8d964cb35048bc961611b55c8d9bd1b942b93c7e1b88157e7f79f2c08dbabe1af4612afe6044ab1be316976111b7019", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet2 with 6 BLS keys", + "pubkey": "d8f8ef204ac892dd1a04648fc449ffdd11418dbd5c8fe623e5efda0bcae47cb41eb99c981585d80be1d668d8b7466619b6ead4d83976cc4f6879627a455603a74ab2adbfb5fed0f1a2b954363d97cbd3ac7feb284c83ac64422fad518e589c8e", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet3 with 4 BLS keys", + "pubkey": "f547e115b1ada7cf9b8aeef45ee0d9ec4b206315ef44be706d994a0571688cd96291d1ab6c3761df29d00a2ba290a3185e4796bc49891906f86e16da01af3fd52320944b96b60e679ac8e686d4819e97e15e5fe46503c556b4acdd8079624005", + "address": "erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet3 with 4 BLS keys", + "pubkey": "0e5cc5f218a8fa9bae1f6b452430c0de205e6b251d0f1d606d1ce28203fb556768e6c4545ce8e90d640ef2cc1062f40ccf2ede124b926cbf3b2b0050b0e19f67e7e36ac1a7049178a77cbd65ee30cd0a40d9f98846ce439cc120717501f03180", + "address": "erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet3 with 4 BLS keys", + "pubkey": "760dc22525dce5be65a3a55ee07f7f012e0a89f435daec56eb475b0b5ca2d84b157894b8df64dfb570ecc633d5e1611639d43976e29f11c232236a9548b0145ee4e43fe495252c8c1f006b8df51d3835dee64a826f43167096b347b6919aa292", + "address": "erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet3 with 4 BLS keys", + "pubkey": "39316473217f7c435a543efa078254198dd079e3c6505e7cc1564b033de8a161dc2e9c392b1e584440510113b5942816102d7be5f4af9461af21a454fc1938a962b256c1c1d1f939198029ed0bf22c62893038d5687787cb46436c0ef4f12417", + "address": "erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet4 with 4 BLS keys", + "pubkey": "6a3b9f6b5fd38e79433daa9bf03543314f8a2a3d9f1fec8ebe2bc1ee97f135d83845dcecd201207c1b31d7624ddb330ae67fbfab4137cd734d96bc0975ae8bcfeecc4441b384d39d6900cdb7436450c23b4cc7674ec50055ea4a90861c503a91", + "address": "erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet4 with 4 BLS keys", + "pubkey": "ee8c987b9af9bba2912763fb7fcd6d6575db60806c5041fa91816ecc339ccfd60bf3cf49fb7017158f0b8e6050276907620bc040816207f14a952bb86752816231ae7f31ff701862cfe0abca367fc4cd63bafd4ad6e4df67612e4ec71462650c", + "address": "erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet4 with 4 BLS keys", + "pubkey": "cfe96f6b010d08f211c83f4ae3eb451d1d5205a50bdcd451706044dc21f523d25f214ab89dd5aab7ae03111197d6e6156e70ab348c9b0fab0a7839ea57fef6cd2324882b4387014dba201e6f87d5ca395e14d900e4563494f4f11a69ef6cdf14", + "address": "erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet4 with 4 BLS keys", + "pubkey": "05e9e43732ecff55e553b35b5ee1416065818db162a6fbf096186a1230d88bd057cebb72c5afaec16a803c4c4f69770752fe29be73a4069d0d01666ede963271192d4f324f2b3dcaec8b2c871c23cf185579a039dd5ab093c7cd9bca53e09c85", + "address": "erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet5 with 3 BLS keys", + "pubkey": "82cfc47999d1499cb880d46e8280b8c4fe576dff20a8ca6f6ac551887c637f935153f9ce2f21921a532477535d42ac05f730760c78415756add2eab6d57d94916f3ad51590b23404739d152f89b6d052df48cace1793897cd4eba722247a6195", + "address": "erd1dzjes5c6a8ru45clgla3q0k3ezm06svefjz7vzs8pjfnrqa8tcasl4j8hs", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet5 with 3 BLS keys", + "pubkey": "7675098e73574db9c59bdce61e4b80251c6536201715dca40b2b69c09ce097690f3a9095d22b006531e3b13b30894803bd7ede3e6d80c9064c431f8671db085ab1052354cb26a7a2436340b273b6c95c84ab94bb9531b99c5f883602b5284017", + "address": "erd1dzjes5c6a8ru45clgla3q0k3ezm06svefjz7vzs8pjfnrqa8tcasl4j8hs", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet5 with 3 BLS keys", + "pubkey": "c50f398a853c670ed625a12eddae175df5a90e034a54484a832566fc91f9b83d5daf1bc821cc347ba7e45f3acd4e1d00d0d7f52235824fd1326a7f370b58fc7dd98edfff4a41739a2015c6ed3a3c0bf3c986efeee187be70f1133fc4379dad95", + "address": "erd1dzjes5c6a8ru45clgla3q0k3ezm06svefjz7vzs8pjfnrqa8tcasl4j8hs", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet6 with 3 BLS keys", + "pubkey": "4bd3f30f608b22b32100c6360def228ec95aa24e3048010bb64606392f602e180a0b2a12f7f92ef1d7f73ce1271ae30693bec692b15802c7ba079939640570fdc7f4d411c084ed0fe612ee223227ca3d02dc9732cf686ba8885007de53f8ec89", + "address": "erd14gg3v6j4505ucx7t2wtl98tgupmyp748aq92jefmp5ha6e3pccgq9clwe9", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet6 with 3 BLS keys", + "pubkey": "71e8458f92997a00c1cd0e638b9ec42ab136828fc13f0ec643b60af451270cc81d50f4c4578a7c93a700ee21e065281593e7995d2454356cbfdeadb9ffe7bf33ba8f7a31a1d2e76bba5a5f88a613ef37e35595838d0b7f4bd12da7d6fe743499", + "address": "erd14gg3v6j4505ucx7t2wtl98tgupmyp748aq92jefmp5ha6e3pccgq9clwe9", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet6 with 3 BLS keys", + "pubkey": "d9c30948bffad18776b786f6367142b76605ac6e33a8d38c68c31c7afb099f1a83efb752a87afaf9d04a4a8fb656e40bfe2a4aa6e0c16b82d22bd6c232c2ce5e6672ac6232d2da6945bc033b04cbaaeb4b9af4b29585094e034ab8dcfb8b9c19", + "address": "erd14gg3v6j4505ucx7t2wtl98tgupmyp748aq92jefmp5ha6e3pccgq9clwe9", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet7 with 2 BLS keys", + "pubkey": "55fc7ab2e8c0a07bef2e1a9b35764cee1d604cb5634b7226a7310ce56a1f02e99d248fc5b416c4253ac7b88353b1a60f31e1104534e36cb00f46bdcb20a0d24f453e2c8d3cc48dc3c6086edbe16149aae14eb3a4d24ee2b217a4759bc0c0ea88", + "address": "erd1xdfc44mk4ut5cv6l3mq0py6h88cty9ykacskm8xv3tvrp893kmxqppcefg", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet7 with 2 BLS keys", + "pubkey": "a8e662e63ad0e87f2dc66cbed41d398b73a2da2aaced6cc466ed378b62daee28b3db8e8327a06278a094b05840965c17448ffc8a1c96e532a7960d1a15d2cabd16edadc476bfb4af3a825aff801f615d127b70b4745b88e01627a99ba52d5317", + "address": "erd1xdfc44mk4ut5cv6l3mq0py6h88cty9ykacskm8xv3tvrp893kmxqppcefg", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet8 with 2 BLS keys", + "pubkey": "f5e5eb9dd18aeb5d4829ab08795a9e4c8632a4fd248feed68382add1f2474d3cec042d51b897871bfee1f1c1fbeabf13d1c39d4f9b412948d27737f2b82e85474b7049a700ee8735373564791f0d20692dd1f8b494de7bab0a8415f01532ed90", + "address": "erd1997jfwzrum4rrk59ar5supcyge9rpa73xgv2p45h3unt880v399svt8c9g", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet8 with 2 BLS keys", + "pubkey": "e66d7ac5e51a382164aeaae9924dda3272296a145d3c6178b962e3b7eb83e75515e665c327e86f3ef597ca840f8c5c0ace19ac9a8fbcdc573f9237d112fb1c467d646737863ccd1fe61f4c4341f9805f8e1fe98348b50c3c3f93f62de3975980", + "address": "erd1997jfwzrum4rrk59ar5supcyge9rpa73xgv2p45h3unt880v399svt8c9g", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet9", + "pubkey": "2a1c49643e564cdf28bba96dfd6cd8ad38a5958b2b3c9a8293ffb54e9df0a0188a67de2fb947b8ae3dd06b7411aaae0e8bedef795ad3b35ac9f1402dcd0631d9d530b01b3880362fbd3ed9a8488ecabfb1b46cac225c5d48c39be3e28503f90f", + "address": "erd1e9cg9ys8fh77n9eaxpg47sxaes4fe9g2nvy6a65qpxykcx8grg9sv45lss", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet10", + "pubkey": "5c9784523f360a802d4687c9c76bcef41a738d034aa8503a055c33898504b09670c1f637ca632e5290b3acf79a2191072f68c4192a9cbeb34f50c4a941e34247a64f642a6a074bec683bdfb83587cfdc0390ebd74505cb836cf04f3268e32f99", + "address": "erd1xdrltsygywhmtxzsmrgjlsxsxrf4y2ayv0z50y666dgsp66trxwqzajk96", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet11", + "pubkey": "db7f7726c3e68abb28d070f529f2c222755d863aa9d7c0200fde10c93ccb8edcee8d45c9eb925bd5a0fa33c54d19270b7058f6e72256dad84214375f189310a73153cd84feef4b493ab61437b0cbcc2c592e6c093653a533631c8e0ab036c207", + "address": "erd1lytewufjflpwl6gtf0faazjr59nd2fhfwlk7ew72hkpgdkmunl8qfrpywg", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet12", + "pubkey": "a27b6f47c53263e5c8d69779a169d50605cdd7ddb4b5384f2d46e08ace6f787a60f6cf26256b62fafba9c91a87ff070bc99254fcb5a73239fc14f2108de62189005b51b21e2922b37c6cc657017832e3a59dfcc7a54ac5dcb997136da4e2748b", + "address": "erd1s8tqztm4u4gw23489lps97qxe8vck8eln3a424y9c6yujsc96nas0l968d", + "initialRating": 5000001 + }, + { + "info": "single key 1 - wallet13", + "pubkey": "d3e0427c22ff9cc80ef4156f976644cfa25c54e5a69ed199132053f8cbbfddd4eb15a2f732a3c9b392169c8b1d060e0b5ab0d88b4dd7b4010fa051a17ef81bdbace5e68025965b00bf48e14a9ec8d8e2a8bcc9e62f97ddac3268f6b805f7b80e", + "address": "erd1p7p0f3n8dxtj08hsp9hccqg932pd4f94rq3adg6g55etx8g4z8tsmg5e0g", + "initialRating": 5000001 + }, + { + "info": "single key 2 - wallet14", + "pubkey": "b0b6349b3f693e08c433970d10efb2fe943eac4057a945146bee5fd163687f4e1800d541aa0f11bf9e4cb6552f512e126068e68eb471d18fcc477ddfe0b9b3334f34e30d8b7b2c08f914f4ae54454f75fb28922ba9fd28785bcadc627031fa8a", + "address": "erd1uyeel03ea837dphrx2ak77hdvlhjdcqdwgyg6k99gqn602ymsn7qptmedj", + "initialRating": 5000001 + }, + { + "info": "single key 3 - wallet15", + "pubkey": "67c301358a41bef74df2ae6aa9914e3a5e7a4b528bbd19596cca4b2fd97a62ab2c0a88b02adf1c5973a82c7544cdc40539ae62a9ac05351cfc59c300bbf4492f4266c550987355c39cff8e84ff74e012c7fd372c240eeb916ef87eead82ffd98", + "address": "erd1ftyzkdhl7rl782mrzrdc2jck3egydp0ydzhcjm9gc8s2jym5egrqadl4h6", + "initialRating": 5000001 + }, + { + "info": "single key 4 - wallet16 with 3 BLS keys", + "pubkey": "ab0a22ba2be6560af8520208393381760f9d4f69fca4f152b0a3fe7b124dd7f932fd8c1fbb372792c235baafac36030ceaf6ebf215de4e8d8d239f347f2fed10a75a07cbf9dc56efbbfca2e319152a363df122c300cdeb2faa02a61ebefd8a0e", + "address": "erd1rsl2sj5g87ltfq0hvrmgm35mlg4lzfs29p4gzxh0lh4vj2e8ykuqh69lha", + "initialRating": 5000001 + }, + { + "info": "single key 5 - wallet16 with 3 BLS keys", + "pubkey": "caa87d67e195b52355d2c8f7f74c829395b134bd4a911f158e04b2d7e66a5ba195265743f10cf190105512fb3df9d708a8056c07a6165874d8749742502c0eada7d15b6c55f22c2cce2cf5001288f6b2d89319e6ff888344c01adcd362be8998", + "address": "erd1rsl2sj5g87ltfq0hvrmgm35mlg4lzfs29p4gzxh0lh4vj2e8ykuqh69lha", + "initialRating": 5000001 + }, + { + "info": "single key 6 - wallet16 with 3 BLS keys", + "pubkey": "598be7548d6bb605bd19d83037bf58a7797a4e48b33011a60a5633cf6fe8d59906130777c46f50a50d3d0f958effb5147befd5d67cbec7c5daddeaade4dca5d8a54fe0394fde7b6455e4fc4db91f33f907d450b45fc2d4a9990f96d893093d91", + "address": "erd1rsl2sj5g87ltfq0hvrmgm35mlg4lzfs29p4gzxh0lh4vj2e8ykuqh69lha", + "initialRating": 5000001 + }, + { + "info": "single key 7 - wallet17 with 2 BLS keys", + "pubkey": "69b277b127d025638dbb54d36baa8321540f6210fc5edaac77f94798c039a383aead3ae7c93cdfb8b4caab93a952d101ee2322c129b6ce2726359a65aa326bd35e54c974118503944fcaf80be80b5c3fc9cf86d574d0096140f16fbc55fc4984", + "address": "erd19yrjty2l4ytl6d3jynp5mqfekq4uf2x93akz60w7l3cp6qzny3psnfyerw", + "initialRating": 5000001 + }, + { + "info": "single key 8 - wallet17 with 2 BLS keys", + "pubkey": "a006ad94b28c414c6ec0a5effb84594f39ede4f82b60aa077e2065b89407c78dd6479ebceed7bd42ed2779c34b718f11651427e550948cb8be2e6cea03a128ac3c52e599ada6f34912b119f94de472af0397a68769f1b3f647e87090918e030b", + "address": "erd19yrjty2l4ytl6d3jynp5mqfekq4uf2x93akz60w7l3cp6qzny3psnfyerw", + "initialRating": 5000001 + }, + { + "info": "single key 9 - wallet18", + "pubkey": "91874fdfa8dfb85faf4f404b21c95fbb5d154db5a6abe46bd7860de9e5ddb78b61b5c6ddcf86e5ec8a237e130ed0fc0e418fb97d6fce5f6642ba33f99eff694ec7fb2921b423899a9a5888914bd625636a9b1ea186566561cd35b79aaca20e88", + "address": "erd148lq42zdzz34y0yr8avldsy7gw0rmuvj4lmstzug77v08z3q0ncszfk8w9", + "initialRating": 5000001 + }, + { + "info": "single key 10 - wallet19", + "pubkey": "cc3e0c1021f8c4c092499547b064cffef19d07f0bf250e5265cea1e49b282a7f6efb4b415ad37db2ef6efa253475f511e74efc2f76c087c9798f72187986bb752f61d0ac220045f8e2d945343f3bbb8ef34a6025fb855dd7d953a81477ad2309", + "address": "erd1k2v4h3805gnxf78c22g7lfe4pgq2lmr4ezmkk2rqkej6yjd7g5ssu88fme", + "initialRating": 5000001 + }, + { + "info": "single key 11 - wallet20", + "pubkey": "c2885340a6ba4341d68f80ce419deadf374bc52e2749c278b5bce5f795e9a90a04ef4f07a0b47777feb1982749b57a174b4927338df9da99a417a2df3152a9ebaf3465bfc092058324edf6892313f24be4612eb5663bb59d67a831dda135aa8b", + "address": "erd1nzjyj2ykpway04pczl42fgrlza2f0eaf97fxgnuuw39vyee36xlqccc3qz", + "initialRating": 5000001 + }, + { + "info": "single key 12 - wallet21", + "pubkey": "cf8a2f97b7822acb16016a6debaaedea39959c9ac60b80e50f24734a0e0f6128ed1d216f5aed71866ca34bb30b6e8300e7995237744e766f6016ca28d4ebb2274326cb7af1a3c12d795cc127a4bf9aa9497d89ef0450c40f675afd1afa761012", + "address": "erd1yp0nvml5c45us3qzreqxkjxaakxn744t3gdva9s8xndcakzawutstepmm5", + "initialRating": 5000001 + }, + { + "info": "single key 13 - wallet22", + "pubkey": "95a81b70474d59c1292bc5742db1a7b9bf03cb516ede6fb5cb3489ee812de8cccfc648f3ff3cda26106396a38c1c1f183b722392397a752d949c5123888b7a8ec012fe518f6efc25015a620b1559e4609286b52921e06b79fd563a9b3b4c4e16", + "address": "erd1qyg80tr4rd65ur3hedm9h4yv3fcwmm6vnyrypnm972nd80889hxqdfgwrc", + "initialRating": 5000001 + }, + { + "info": "single key 14 - wallet23", + "pubkey": "5909def579148f456e8490659b859f80f8ccd62b5adda411e1acdc615c2ec795a88632cf2ec210a56ba91973fd3f07160f559f82f7afaafee008679fefb1b0cd2f26f4324197e6239c000accd1c427138568a8a9e276690c154d3df71a1f970c", + "address": "erd14x6d48q59zjh5p909fyw7e46czftgdawyf734cnmgk5e63ghrvvsqp254t", + "initialRating": 5000001 + }, + { + "info": "single key 15 - wallet24", + "pubkey": "58d6cfe7e8c3ec675da17e492c4ba97759fa15fc0f41bbe29d1d49d5f5ca7db142450ada15e1e4bf4657614e26cceb04ed5c0ca17207b0e24c4baf5f91afc092d43a02aaeae76218420817c85292f8de7d3a2b4f3c8615c2bb6a6d1c74267788", + "address": "erd1wyxylus33e476h5kta7e0caeqvgvcgrxh0az33e7szya6g7mh2ws0n27sa", + "initialRating": 5000001 + }, + { + "info": "single key 16 - wallet25", + "pubkey": "eb79770be0ae70e1d6932832eab94117b0c1a2442b3fdb380b1ad5a809b6221a4905e02a628886c925d152c4e5006413fe69d1f11cf543f4802d4ce4e5eac2b18b78a79215c737e2e098b40802044bc6e946b712299286c34f6d33d8b681790d", + "address": "erd1v3ylw7t6vzjzs06xjf6ccmf576ud38g2ws45tjkjg48s38jefpzqlwms9w", + "initialRating": 5000001 + }, + { + "info": "single key 17 - wallet26", + "pubkey": "bc03265a52610464f2f0431a69647be3106924f5bf67cf87cd889bf86d81739b3f0f37bad11ab93c5209dc4496f4130d69a9649596b97884b7e91e0b4d7c59169dd0729ac3e3bcd308efac56bc29d3cc249d8759580ab117943aa40df3baac05", + "address": "erd1twel4azu6uptw878y063p93mjr84y5m4kpsww2aeqj4pg5jeplgst04rhg", + "initialRating": 5000001 + }, + { + "info": "single key 18 - wallet27", + "pubkey": "aa4be8f36c2880ee4d2ca79dbd7a53537e3965f255dfb5c75324fe29fcb6ce56148fbaea334268e413f0df95f580c40fb3484165b2852236e3a1aa68151ac3327d981cfae52d99f9a564bd3139cdd768661854dae78880d9320191cdb2989815", + "address": "erd1q2se75ucl9as9j7e48v00jrnj6hvtk5vqxa4a3ag5729vctsdkasm20cyc", + "initialRating": 5000001 + }, + { + "info": "single key 19 - wallet28", + "pubkey": "3e86fea8365791b3becfc9aa2bc239f6be58725e61e46e7935c56479ad285e0781da1f277980d2e1d0ecff3982f2d90f321aa03f3d934adf260628d0ed0dc81a98dfaf1e6278e042d6c78dc65f2fa79d3b457754a321b8a0d7bf9998feeea817", + "address": "erd18cc6cm35xhv7kzwsm79l4ma6jpz3ee5l0yjxuc66kh6rcgtawtuq6lzp9f", + "initialRating": 5000001 + }, + { + "info": "single key 20 - wallet29", + "pubkey": "aa92cf6e0ac62df09e7adca139c41a162ad668e7a797770b6d195cd9b175d0fca9eac3f4bf859967139f2ba109741a144e3dc5e6ccaeb6cd21f1d202b10f08832274cd9cdf6b10dbc2c60acdd1c70ae9beae2139e2b69eccbcde32a7f3991393", + "address": "erd1psux99h4jljyt3nkw8pruv3spw5r0unqe4wk8837mm9my88gl28qj6mml5", + "initialRating": 5000001 + }, + { + "info": "single key 21 - wallet30", + "pubkey": "f2b7819d1c2e2e1d007edcf896034085645f3c81e7c7fe21aa7ad4f35f8b863ee1db13448d15a3d0d15018f741a991010a9374710b628e41ef078be8a10249f2a3000598432c28186af1c04a219ac914434dca9c27e61485d701505112093f8a", + "address": "erd1vgm89ngmv2ghzsyq8xjtt45crekkxnhsq30yxzlq86uc3ra3r57qa3mw2p", + "initialRating": 5000001 + }, + { + "info": "single key 22 - wallet31", + "pubkey": "292742eee9d12dade21b4cd8bcd44c210c26d927ef6dbd9cad59008643a971a86ea6dfce247515d4266789b3fe8e35167278e781e52b4cd7b9781554ba67ecc08680eb19628e7741c94d8456090a08aceab1c8d2ed39bf59e8e282381aa32a0a", + "address": "erd1k767vmmn8vg8xvuny32ppwr4dxrlgmpykn0u7nm92evlag3wkukqdgsf5u", + "initialRating": 5000001 + }, + { "info": "single key 23 - wallet32", + "pubkey": "11f784d2970d65769ce267710b3d08b28b78c3f79283758918c8ef15717ccbe90c23348cafe0e98a5d101b8dafbe7d081c6821dee8bf40ba150664ccc2dbbdd6358c92404e677d82910ce61f1d7584fbbbc9ebf71b7f35a118556e2a5c220501", + "address": "erd1hwe8lskmzsdpuy3f6hldamvn0zrhzldec8m4tt8hupq58d7gyrequy8wsp", + "initialRating": 5000001 + }, + { + "info": "single key 24", + "pubkey": "0382c11222db8a15e42e3ff64893df46c7720b439fb2a546462815ac0a8fa3bed99fceae5da9b68524e36f61cc074d09ceafec274c54f182c56a77583f9421f19c777265c43da1d5747304b36f0367cf3e8e5f63f41dad1a4362d9e1997a9e16", + "address": "erd125eyrjk99zadr04gm9z2p4nckmnegexs5nyk7ek85rut2665t75sql3w88", + "initialRating": 5000001 } ] } diff --git a/cmd/node/config/p2p.toml b/cmd/node/config/p2p.toml index 62d30fd19f7..5d08f4be919 100644 --- a/cmd/node/config/p2p.toml +++ b/cmd/node/config/p2p.toml @@ -23,10 +23,11 @@ [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" # TCP listen address PreventPortReuse = false - [Node.ResourceLimiter] - Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". - ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" - ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" + + [Node.ResourceLimiter] + Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". + ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" + ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" # P2P peer discovery section @@ -48,9 +49,12 @@ # RefreshIntervalInSec represents the time in seconds between querying for new peers RefreshIntervalInSec = 10 - # ProtocolID represents the protocol that this node will advertize to other peers - # To connect to other nodes, those nodes should have the same ProtocolID string - ProtocolID = "/erd/kad/1.0.0" + # ProtocolIDs represents the protocols that this node will advertise to other peers + # To connect to other nodes, those nodes should have at least one common protocol string + ProtocolIDs = [ + "/erd/kad/1.0.0", + "mvx-main", + ] # InitialPeerList represents the list of strings of some known nodes that will bootstrap this node # The address will be in a self-describing addressing format. @@ -71,10 +75,10 @@ [Sharding] # The targeted number of peer connections - TargetPeerCount = 36 + TargetPeerCount = 41 MaxIntraShardValidators = 7 MaxCrossShardValidators = 15 - MaxIntraShardObservers = 2 + MaxIntraShardObservers = 7 MaxCrossShardObservers = 3 MaxSeeders = 2 diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index 98d5c02557f..8f3a2343a79 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -8,7 +8,7 @@ # In multikey mode, all bls keys not mentioned in NamedIdentity section will use this one as default NodeDisplayName = "" - # Identity represents the keybase/GitHub identity when the node does not run in multikey mode + # Identity represents the GitHub identity when the node does not run in multikey mode # In multikey mode, all bls keys not mentioned in NamedIdentity section will use this one as default Identity = "" @@ -28,7 +28,7 @@ # ] PreferredConnections = [] - # ConnectionWatcherType represents the type of a connection watcher needed. + # ConnectionWatcherType represents the type of the connection watcher needed. # possible options: # - "disabled" - no connection watching should be made # - "print" - new connection found will be printed in the log file @@ -38,17 +38,22 @@ # so that certain config values need to remain the same during upgrades. # (for example, an Elasticsearch user wants external.toml->ElasticSearchConnector.Enabled to remain true all the time during upgrades, while the default # configuration of the node has the false value) - # The Path indicates what value to change, while Value represents the new value in string format. The node operator must make sure - # to follow the same type of the original value (ex: uint32: "37", float32: "37.0", bool: "true") - # File represents the file name that holds the configuration. Currently, the supported files are: config.toml, external.toml, p2p.toml and enableEpochs.toml + # The Path indicates what value to change, while Value represents the new value. The node operator must make sure + # to follow the same type of the original value (ex: uint32: 37, float32: 37.0, bool: true) + # Also, the Value can be a struct (ex: { StartEpoch = 0, Version = "1.5" }) or an array (ex: [{ StartEpoch = 0, Version = "1.4" }, { StartEpoch = 1, Version = "1.5" }]) + # File represents the file name that holds the configuration. Currently, the supported files are: + # api.toml, config.toml, economics.toml, enableEpochs.toml, enableRounds.toml, external.toml, fullArchiveP2P.toml, p2p.toml, ratings.toml, systemSmartContractsConfig.toml # ------------------------------- # Un-comment and update the following section in order to enable config values overloading # ------------------------------- # OverridableConfigTomlValues = [ - # { File = "config.toml", Path = "StoragePruning.NumEpochsToKeep", Value = "4" }, - # { File = "config.toml", Path = "MiniBlocksStorage.Cache.Name", Value = "MiniBlocksStorage" }, - # { File = "external.toml", Path = "ElasticSearchConnector.Enabled", Value = "true" } - #] + # { File = "config.toml", Path = "StoragePruning.NumEpochsToKeep", Value = 4 }, + # { File = "config.toml", Path = "MiniBlocksStorage.Cache.Name", Value = "MiniBlocksStorage" }, + # { File = "external.toml", Path = "ElasticSearchConnector.Enabled", Value = true }, + # { File = "external.toml", Path = "HostDriversConfig", Value = [ + # { Enabled = false, URL = "127.0.0.1:22111" }, + # ] }, + # ] # BlockProcessingCutoff can be used to stop processing blocks at a certain round, nonce or epoch. # This can be useful for snapshotting different stuff and also for debugging purposes. @@ -71,7 +76,7 @@ # NamedIdentity represents an identity that runs nodes on the multikey # There can be multiple identities set on the same node, each one of them having different bls keys, just by duplicating the NamedIdentity [[NamedIdentity]] - # Identity represents the keybase/GitHub identity for the current NamedIdentity + # Identity represents the GitHub identity for the current NamedIdentity Identity = "" # NodeName represents the name that will be given to the names of the current identity NodeName = "" diff --git a/cmd/node/config/systemSmartContractsConfig.toml b/cmd/node/config/systemSmartContractsConfig.toml index 19c5355a775..e71d05799f7 100644 --- a/cmd/node/config/systemSmartContractsConfig.toml +++ b/cmd/node/config/systemSmartContractsConfig.toml @@ -8,9 +8,11 @@ NumRoundsWithoutBleed = 100 MaximumPercentageToBleed = 0.5 BleedPercentagePerRound = 0.00001 - MaxNumberOfNodesForStake = 36 + MaxNumberOfNodesForStake = 64 UnJailValue = "2500000000000000000" #0.1% of genesis node price - ActivateBLSPubKeyMessageVerification = false + ActivateBLSPubKeyMessageVerification = true + StakeLimitPercentage = 1.0 #fraction of value 1 - 100%, for the time being no stake limit + NodeLimitPercentage = 0.1 #fraction of value 0.1 - 10% [ESDTSystemSCConfig] BaseIssuingCost = "5000000000000000000" #5 eGLD @@ -34,9 +36,16 @@ [DelegationManagerSystemSCConfig] MinCreationDeposit = "1250000000000000000000" #1.25K eGLD - MinStakeAmount = "10000000000000000000" #10 eGLD + MinStakeAmount = "1000000000000000000" #1 eGLD ConfigChangeAddress = "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80" #should use a multisign contract instead of a wallet address [DelegationSystemSCConfig] MinServiceFee = 0 MaxServiceFee = 10000 + +# Changing this config is not backwards compatible +[SoftAuctionConfig] + TopUpStep = "10000000000000000000" # 10 EGLD + MinTopUp = "1000000000000000000" # 1 EGLD should be minimum + MaxTopUp = "32000000000000000000000000" # 32 mil EGLD + MaxNumberOfIterations = 100000 # 100k max number of iterations for soft auction config diff --git a/cmd/node/config/testKeys/delegators.pem b/cmd/node/config/testKeys/delegators.pem new file mode 100644 index 00000000000..78f89d05110 --- /dev/null +++ b/cmd/node/config/testKeys/delegators.pem @@ -0,0 +1,50 @@ +-----BEGIN PRIVATE KEY for erd1z48u9l275l2uy4augfytpp2355wvdnc4gwc9ms9gdhdqru3fz9eq5wqe3e----- +MzJlYzk2ZTgxMDMyYzBiZjhmN2UxNjhhODliNTc2MGMxMzM5NmMyNmEyNDhiYzU0 +NjhlMTVmZTlmZDc3NDM4YTE1NGZjMmZkNWVhN2Q1YzI1N2JjNDI0OGIwODU1MWE1 +MWNjNmNmMTU0M2IwNWRjMGE4NmRkYTAxZjIyOTExNzI= +-----END PRIVATE KEY for erd1z48u9l275l2uy4augfytpp2355wvdnc4gwc9ms9gdhdqru3fz9eq5wqe3e----- +-----BEGIN PRIVATE KEY for erd1qm5erxm0va3hcw9jfxtp2gl3u9q9p4k242pk9xx3vezefkgj2vhs0wk8cx----- +NWI5ODczMjc4YjExNmFkMmE2NjY2NTI2MmVmNDhlN2FlYWM4OWRlMTAyMDhkZGEw +ODdmMWVjMThkMDBkMzc5NTA2ZTk5MTliNmY2NzYzN2MzOGIyNDk5NjE1MjNmMWUx +NDA1MGQ2Y2FhYTgzNjI5OGQxNjY0NTk0ZDkxMjUzMmY= +-----END PRIVATE KEY for erd1qm5erxm0va3hcw9jfxtp2gl3u9q9p4k242pk9xx3vezefkgj2vhs0wk8cx----- +-----BEGIN PRIVATE KEY for erd1rqwt9vpfn072tahtcvup2dxz4nvqfs3n5p9eh0jnypkppxmdheaqpcqfzz----- +MmFjNGZlYTlkNmI3OWQ5ZGU5MjkyZmZlZGE4ZjkwYWNmODQzNzVmZDIwOGEyMjkz +YjcxN2JhNWI1ZWI1MjQ3ZjE4MWNiMmIwMjk5YmZjYTVmNmViYzMzODE1MzRjMmFj +ZDgwNGMyMzNhMDRiOWJiZTUzMjA2YzEwOWI2ZGJlN2E= +-----END PRIVATE KEY for erd1rqwt9vpfn072tahtcvup2dxz4nvqfs3n5p9eh0jnypkppxmdheaqpcqfzz----- +-----BEGIN PRIVATE KEY for erd17uygg7qq05mjhectgymj6fwq59ysr4p92cy0yz3jrxxj6253p40sj77wr6----- +MzU0N2M5MWRhNzRhMTY5MDZjMzhkMTY5ODQ4MWRiOGI1Zjk0YWJjM2VlNDgyMjY5 +ZDhjMDEzMTdlOWVlYWUxYWY3MDg4NDc4MDA3ZDM3MmJlNzBiNDEzNzJkMjVjMGEx +NDkwMWQ0MjU1NjA4ZjIwYTMyMTk4ZDJkMmE5MTBkNWY= +-----END PRIVATE KEY for erd17uygg7qq05mjhectgymj6fwq59ysr4p92cy0yz3jrxxj6253p40sj77wr6----- +-----BEGIN PRIVATE KEY for erd1qslr87nj5gkv2396js3fa2za5kqgwugqnz4j4qqh22mxpnse2lws8srsq6----- +MDc5N2IzZDFmMmY1YzYzOTYxYjdhMThmNmI2OWZlNDk0NmJkNjUyOGFhNjU3ZTQw +Zjg3NjY2MmM3MmNhMWQ3ODA0M2UzM2ZhNzJhMjJjYzU0NGJhOTQyMjllYTg1ZGE1 +ODA4NzcxMDA5OGFiMmE4MDE3NTJiNjYwY2UxOTU3ZGQ= +-----END PRIVATE KEY for erd1qslr87nj5gkv2396js3fa2za5kqgwugqnz4j4qqh22mxpnse2lws8srsq6----- +-----BEGIN PRIVATE KEY for erd17pjlqg55c6v3fjvpqec8peefk74g8neygr84ymw7cqzudmzaw7lqnln7sz----- +NTBkMzFiMzdmOWMyM2NmMWMyYjgzZThkNGRmYzgzNjU5OTkyOGIxMzVhZDI3OGQ0 +Yzk5Y2UyZjFhMDUzMjI4YWYwNjVmMDIyOTRjNjk5MTRjOTgxMDY3MDcwZTcyOWI3 +YWE4M2NmMjQ0MGNmNTI2ZGRlYzAwNWM2ZWM1ZDc3YmU= +-----END PRIVATE KEY for erd17pjlqg55c6v3fjvpqec8peefk74g8neygr84ymw7cqzudmzaw7lqnln7sz----- +-----BEGIN PRIVATE KEY for erd19ztfuew6ejrwq5mpax4xztjwh5j63u9vge4dum9vlyy7hg3pc86qgmt6nm----- +Mjg2ZjFlOGJmNDY5OGU3ODMwNzc2YTRjZjdiMDcwZDNhZGUzYzQyMzgwN2U1ODdk +MTYxN2Y3NDBlNWZiYzU3MjI4OTY5ZTY1ZGFjYzg2ZTA1MzYxZTlhYTYxMmU0ZWJk +MjVhOGYwYWM0NjZhZGU2Y2FjZjkwOWViYTIyMWMxZjQ= +-----END PRIVATE KEY for erd19ztfuew6ejrwq5mpax4xztjwh5j63u9vge4dum9vlyy7hg3pc86qgmt6nm----- +-----BEGIN PRIVATE KEY for erd1age5t5qfrke4vm47vaq9a4yewllh6227qm4fcy3rc7g5ktmzyatsgf4wcw----- +MzkzMDA1MjU2OWY2MDhkYjYxOGI1NDYzMmI1ZWFkZGNhYmJhODQ2NGJmMjY4NWU4 +YmU0YWY5MDNkNzAwYTQ0NGVhMzM0NWQwMDkxZGIzNTY2ZWJlNjc0MDVlZDQ5OTc3 +ZmY3ZDI5NWUwNmVhOWMxMjIzYzc5MTRiMmY2MjI3NTc= +-----END PRIVATE KEY for erd1age5t5qfrke4vm47vaq9a4yewllh6227qm4fcy3rc7g5ktmzyatsgf4wcw----- +-----BEGIN PRIVATE KEY for erd1jt0vv29trqs3nddzkxsf950xx0t5uvyncmuamwryneh9wsee5wpsgue96d----- +NTBlZDI0NzM3ZWNlOGEzYWZlYjJlZTY3N2NiNzUxYWI0ZTA4OWNhMGY3ODhlNjNj +MmVhNWQzMGE2MmMzNmE4ZTkyZGVjNjI4YWIxODIxMTliNWEyYjFhMDkyZDFlNjMz +ZDc0ZTMwOTNjNmY5ZGRiODY0OWU2ZTU3NDMzOWEzODM= +-----END PRIVATE KEY for erd1jt0vv29trqs3nddzkxsf950xx0t5uvyncmuamwryneh9wsee5wpsgue96d----- +-----BEGIN PRIVATE KEY for erd1c83rmk3n8ys4g9dkg3q70thx3v787qtpfmk23epu4xsadpyd3dnsejf2r7----- +OGQ1MDBkNjg3NTA4MWU0Y2JjODk5ZTMxNmYwOGVmMDVkZDMyODRkMWFhZDUzYmJk +NGRmYmY4MTAyMzEyYmY4YmMxZTIzZGRhMzMzOTIxNTQxNWI2NDQ0MWU3YWVlNjhi +M2M3ZjAxNjE0ZWVjYThlNDNjYTlhMWQ2ODQ4ZDhiNjc= +-----END PRIVATE KEY for erd1c83rmk3n8ys4g9dkg3q70thx3v787qtpfmk23epu4xsadpyd3dnsejf2r7----- diff --git a/cmd/node/config/testKeys/group1/allValidatorsKeys.pem b/cmd/node/config/testKeys/group1/allValidatorsKeys.pem new file mode 100644 index 00000000000..0a34418f748 --- /dev/null +++ b/cmd/node/config/testKeys/group1/allValidatorsKeys.pem @@ -0,0 +1,60 @@ +-----BEGIN PRIVATE KEY for 309198fd7b6bccb1b6279ea2e5c5f2a33b9f6fe5f23180778d8721710344989b07d10dd1bd19307b3cd06eab9d1358062511610ccdad681ae1165016256cc1fdc0fed5041a5c29d7773b2994022bf2dc9efb937b3bb7cc9d72670448fad7d091----- +NmRjMzcwNGQ0YzhkOTcyM2I2MjBmZmUwOTkyNDk5ODhiNzc3NmRiMDliYTI3NjAx +MWY1MTc1ZWM1ZTZlNWIzNg== +-----END PRIVATE KEY for 309198fd7b6bccb1b6279ea2e5c5f2a33b9f6fe5f23180778d8721710344989b07d10dd1bd19307b3cd06eab9d1358062511610ccdad681ae1165016256cc1fdc0fed5041a5c29d7773b2994022bf2dc9efb937b3bb7cc9d72670448fad7d091----- +-----BEGIN PRIVATE KEY for cd8cc9d70a8f78df0922470f1ebee727f57a70fb0571c0b512475c8c7d3ce1d9b70dd28e6582c038b18d05f8fbd6ac0a167a38614c40353b32ef47896d13c45bde57e86d87dd6e6f73420db93aeb79a1fd8f4e297f70478685a38ed73e49598f----- +ZGMyYmYxYzVjNzY1OTI2MjVmZGVmNzFkNGJiNjlkZTFiYmNkMGIyZmUwYWU4NzY2 +YzQyMmFmMjM1NmQ2MWY2OA== +-----END PRIVATE KEY for cd8cc9d70a8f78df0922470f1ebee727f57a70fb0571c0b512475c8c7d3ce1d9b70dd28e6582c038b18d05f8fbd6ac0a167a38614c40353b32ef47896d13c45bde57e86d87dd6e6f73420db93aeb79a1fd8f4e297f70478685a38ed73e49598f----- +-----BEGIN PRIVATE KEY for e441b8a7d35545f7a02f0d57d144c89810d67ecb9abb5804d1bcfd8b270dc30978c56fbf9618e9a6621ca4e6c545c90807f7fe5edfd34ccab83db7bc0bd68536bb65403503d213c9763ec2137d80081579bb1b327f22c677bdb19891f4aae980----- +MTA0NWEwNjFlYzVmY2E5NWZiZmQwYmY2YWJjYjRiNDM4ODI0M2U0MzdjZTAwZTZl +ZTMzYTcxY2MyZThlNTQxMw== +-----END PRIVATE KEY for e441b8a7d35545f7a02f0d57d144c89810d67ecb9abb5804d1bcfd8b270dc30978c56fbf9618e9a6621ca4e6c545c90807f7fe5edfd34ccab83db7bc0bd68536bb65403503d213c9763ec2137d80081579bb1b327f22c677bdb19891f4aae980----- +-----BEGIN PRIVATE KEY for 72d8341713a271d510c7dfd02455ef86e9af4c66e06ac28fbb4c4e8df1e944e685bae2bee2af4101c19922b64e44e40b5d4905755594a719c1d50dc210515495d0de9b3a1d4ed42fd45a973353fe2c2548c45bb2157d8bf68e0937cc20fe1011----- +YzFkZWY5YTY3YTBhYmI1MzVjNjYyYjE1MTIwMjA2NjgwZTc0MjBhODYyNTkyZjRi +NTQ2NjE5NDM0YTBlOTI2Nw== +-----END PRIVATE KEY for 72d8341713a271d510c7dfd02455ef86e9af4c66e06ac28fbb4c4e8df1e944e685bae2bee2af4101c19922b64e44e40b5d4905755594a719c1d50dc210515495d0de9b3a1d4ed42fd45a973353fe2c2548c45bb2157d8bf68e0937cc20fe1011----- +-----BEGIN PRIVATE KEY for 796d2f56ee9fa8f4ff9fc7735bbb4d644be8dd044f7f965362aed3d562e00b11f730b2fe21eb9c9fd100b68a5d3dbf07bae63d25739f1304ab638330f0e8c207a76de648e2523ad1693f4ffe9def8a1d5aca2c6c1c14e1fcc20089db069d1a0e----- +YzUyY2M3YzVkY2Y5MWZkMDgyZDcwZDZlZDg0NWY1YWZkZDNiODRiZWFjOWE4MTU3 +YWFiYTAxNTQ1ODIxMmUxOQ== +-----END PRIVATE KEY for 796d2f56ee9fa8f4ff9fc7735bbb4d644be8dd044f7f965362aed3d562e00b11f730b2fe21eb9c9fd100b68a5d3dbf07bae63d25739f1304ab638330f0e8c207a76de648e2523ad1693f4ffe9def8a1d5aca2c6c1c14e1fcc20089db069d1a0e----- +-----BEGIN PRIVATE KEY for 4cc0bfcdf4515927e3ae7890197bfd7c2e3c6f80ff7570fc6313d095e0d4e518ecc81db7087fefb8af166723a32ded1324f4ee00a6a97d206a024fd95ab60f7fe18c896c829ac43782c56d922415fb4ddbc0384936e3f860feb0666da43bcd19----- +NWM4OTQzMjExNWU1ZjVkMGI2YzEzOGI4MjI2MjVlZmM2MDk2NzIyNWRmMThlNzVj +MTFhMTYzMGM5MmRlOTI1YQ== +-----END PRIVATE KEY for 4cc0bfcdf4515927e3ae7890197bfd7c2e3c6f80ff7570fc6313d095e0d4e518ecc81db7087fefb8af166723a32ded1324f4ee00a6a97d206a024fd95ab60f7fe18c896c829ac43782c56d922415fb4ddbc0384936e3f860feb0666da43bcd19----- +-----BEGIN PRIVATE KEY for 3449c9d672472ea52e83be4a8d6ce55785044233d848ac0a9a4b7fc22bf5b1bf5776f95f4b459f4e4cd3ba1d20b46a197c5a55ec00fa8d795d35f2232a6fae360129d186e3a0c219d42d8357de2823c8566da7841c2739b30d1581c4a38ec80e----- +NjIwZjkzZGZhMmQ1ZWY2NzliY2EzYTQ1MzE2NTg1ODU2OTVjNDM5NzM2NTgzNTJk +ZGM2OWU0MjQ4ZGQxNjQ0NQ== +-----END PRIVATE KEY for 3449c9d672472ea52e83be4a8d6ce55785044233d848ac0a9a4b7fc22bf5b1bf5776f95f4b459f4e4cd3ba1d20b46a197c5a55ec00fa8d795d35f2232a6fae360129d186e3a0c219d42d8357de2823c8566da7841c2739b30d1581c4a38ec80e----- +-----BEGIN PRIVATE KEY for 1bdef34453a83d19748b02d964c43409a66535678b6234c42d289ed2b7571bf60d35ba7a43cd951d4fc9fc2e8ff618038a2edc9cb0181dcac0b62a0859aafd8d9993aa3069c14fec11cb66a653311a37861d225847bf535bcb920b4f0ea98b8b----- +YWNkNzhjNzk2OTc5YjIxMTk5ZDc0YzgwNmExNzE1Y2EyNjNiMGMyNDI2MzFhZmNi +YzdlODNmYTRmMzFkNjMzMw== +-----END PRIVATE KEY for 1bdef34453a83d19748b02d964c43409a66535678b6234c42d289ed2b7571bf60d35ba7a43cd951d4fc9fc2e8ff618038a2edc9cb0181dcac0b62a0859aafd8d9993aa3069c14fec11cb66a653311a37861d225847bf535bcb920b4f0ea98b8b----- +-----BEGIN PRIVATE KEY for 3bd60bd8c5ace7d1999cd4bfd93dcb7bdc397d8d84efa5fd89439b7b727b0331bd00e3feae85df79e7d2b5eba1ea07003972fde3a7eb8d4ba25583a848be812e89b75fe8f3531d810ba2aaef629748ace6ac5ae73d8a2e6a65bb379f5be3b906----- +YWQ0ODk0ZmIzYjhkOTBiN2QzNTNhN2NhZjc4NTE1MjlhOTRkNjkyMjIyMGU4OTI5 +YzdjODMzOGJiNDRlZWExMw== +-----END PRIVATE KEY for 3bd60bd8c5ace7d1999cd4bfd93dcb7bdc397d8d84efa5fd89439b7b727b0331bd00e3feae85df79e7d2b5eba1ea07003972fde3a7eb8d4ba25583a848be812e89b75fe8f3531d810ba2aaef629748ace6ac5ae73d8a2e6a65bb379f5be3b906----- +-----BEGIN PRIVATE KEY for 2b9a1e5e291471f9eb0cf1a52db991f2dbb85446d132f47381b7912b041be00cccab748c25bdd6165cd6a517b6e99b0133bda4dc091dcdf6d17bc460ac0e8c6fe4b2460a980dd8dea8c857647bc5826a2b77fc8ba92c02deb2ba3daafb4d5407----- +NTk1NTg4YWMyMWI4ZGU4MThjYzdkMDI4NThmZDU4ZDk5NTg3Mjk0NDRiMzk0OWM5 +MzBjYjIwZGEyYWNlZTMzYg== +-----END PRIVATE KEY for 2b9a1e5e291471f9eb0cf1a52db991f2dbb85446d132f47381b7912b041be00cccab748c25bdd6165cd6a517b6e99b0133bda4dc091dcdf6d17bc460ac0e8c6fe4b2460a980dd8dea8c857647bc5826a2b77fc8ba92c02deb2ba3daafb4d5407----- +-----BEGIN PRIVATE KEY for e464c43c4f3442ec520d4a6464d7fa96397ab711adf38b5a96f208303337f6a97ffdbd22e34a10deef6aa21ff078360d2bf7fae627a1ec55a9f120b35224b8e461b0f4de7d3ce800e6b910f37c4d03cce7039ce3a4a3a79ac0511c36435ccf85----- +NDk1MzMwNThiY2VmZjNmOTFmMTRlMTI4MWE0OWRiZDkyYzAwOTVjOTcxMTViMmY3 +Yzk3OWFkNjdjOWVlNjM0YQ== +-----END PRIVATE KEY for e464c43c4f3442ec520d4a6464d7fa96397ab711adf38b5a96f208303337f6a97ffdbd22e34a10deef6aa21ff078360d2bf7fae627a1ec55a9f120b35224b8e461b0f4de7d3ce800e6b910f37c4d03cce7039ce3a4a3a79ac0511c36435ccf85----- +-----BEGIN PRIVATE KEY for dab5c34e66e096d1f328fd9b045136724c8ccbf7b5a4bcf1e8c9edc9510712c0a7feff7818563aa799b37f1cdcfb330cc49d8482c7154988d33f63fe2526b27945326112c832fdf72a1b35f10da34c6e08b4079d9c56195c1ab64c84eab93b95----- +NDZlZDkwNzcwNTQwNjcyZTlmYTQzODUyNzc3YjM0OGM1MmIzNmM3YjAzZGYwMmJk +ZjE0NmM0MTkxMjQwNjE0NQ== +-----END PRIVATE KEY for dab5c34e66e096d1f328fd9b045136724c8ccbf7b5a4bcf1e8c9edc9510712c0a7feff7818563aa799b37f1cdcfb330cc49d8482c7154988d33f63fe2526b27945326112c832fdf72a1b35f10da34c6e08b4079d9c56195c1ab64c84eab93b95----- +-----BEGIN PRIVATE KEY for a5624bec34e06d5026a334654be9e0118c8a02720a6bd868a51d5eb687819442cded09d1fef2e9d9db8bb2d5be01f1148b4819aee9e6a48b9c530285dbc4d800f4dd10d7f9a75d4b36de8fb52aec672cec91e0256f7e9848b10219748d9e708b----- +YzY2MjU0NGU0OWM1YTRkMTdmZjQ4YjZkZjU0YzdkZmUzZWRlY2M1Yjk2ZWM1MjMx +OGRjZjAyZjkwMjdjNTg1ZQ== +-----END PRIVATE KEY for a5624bec34e06d5026a334654be9e0118c8a02720a6bd868a51d5eb687819442cded09d1fef2e9d9db8bb2d5be01f1148b4819aee9e6a48b9c530285dbc4d800f4dd10d7f9a75d4b36de8fb52aec672cec91e0256f7e9848b10219748d9e708b----- +-----BEGIN PRIVATE KEY for 283ccdc58e0df19717ecd0c4c3a553059bf6c8d91b9c7b624afa8cb0564c7fd86e5a199973d17b7b939e63186b25f20a7234ad7f162c8f2547ba2e326c30a6c0571ea04cba83b35fd9a2a60f13e95ee1767b5fe87cbf378458ac7e27b4833f96----- +NGJjZmIzODdkYmJkN2Q3NzIxOWVmOWFkZGI3OTMyZmRlYzcwNjZiOTk3MmVkNjg3 +ZjkyYmIyMzg5MGFhOTMzMQ== +-----END PRIVATE KEY for 283ccdc58e0df19717ecd0c4c3a553059bf6c8d91b9c7b624afa8cb0564c7fd86e5a199973d17b7b939e63186b25f20a7234ad7f162c8f2547ba2e326c30a6c0571ea04cba83b35fd9a2a60f13e95ee1767b5fe87cbf378458ac7e27b4833f96----- +-----BEGIN PRIVATE KEY for 7d1a1b4f36fcd8cea426005212022511bc25a414019e2b5c65947f00c28c8f1220ff1473c36efaa22a94d3e2b5258705ff6efb91902afb2c951c90502edf60072c3330ad9fd1b5c4b85226f10a474a0ddda9b61730946629b110b6eac70de70a----- +ZDE1ZDk1YzdhMGU1ZGY5MDRmNzQxODI2NDFiN2FlOGEwYmJkYzE5Y2RkOGNhMGZh +MzEyNDI3OTY2YjNkODE1YQ== +-----END PRIVATE KEY for 7d1a1b4f36fcd8cea426005212022511bc25a414019e2b5c65947f00c28c8f1220ff1473c36efaa22a94d3e2b5258705ff6efb91902afb2c951c90502edf60072c3330ad9fd1b5c4b85226f10a474a0ddda9b61730946629b110b6eac70de70a----- diff --git a/cmd/node/config/testKeys/group2/allValidatorsKeys.pem b/cmd/node/config/testKeys/group2/allValidatorsKeys.pem new file mode 100644 index 00000000000..cbd478d5b5b --- /dev/null +++ b/cmd/node/config/testKeys/group2/allValidatorsKeys.pem @@ -0,0 +1,60 @@ +-----BEGIN PRIVATE KEY for eedbc5a3141b92148205d178d150905e68ca745ba54617025f84b33c91233afda2c2109084821e14f9a50d3f220fbc000ce6ad432f2a1865da9c6547016ecc7e07242ef490c0bdda29ec677f3e833f54eb2cf27e95b10b8edbdfa7de4e1bc000----- +YzU4OWY2MTQ1MjUyZjg4MmExYmIwY2QyNzVjOTQ5MzZlMjMxYTk0ZTZhYmNjM2Q1 +ZGY3OTA2Mzc0M2NhZmMwYw== +-----END PRIVATE KEY for eedbc5a3141b92148205d178d150905e68ca745ba54617025f84b33c91233afda2c2109084821e14f9a50d3f220fbc000ce6ad432f2a1865da9c6547016ecc7e07242ef490c0bdda29ec677f3e833f54eb2cf27e95b10b8edbdfa7de4e1bc000----- +-----BEGIN PRIVATE KEY for 48c77bbf5d619fb13d76883736c664493b323c00f89131c9d70b3e4ac875103bd8248e790e47a82c9fdcd46fe33b52093a4b3b248ce20e6f958acd22dfb17335fcaf752bab5e29934f0a7e0af54fb2f51a9e6b1be30abdd701f7c9fbd0ad5d8e----- +ZjJmZjg3MTNmMzdjZmYxYTljZTM5MTA4ZjA3OGFkOTc2OGViYzg2MDY0NTEyYjg2 +OTFhYTk0MmE3ODQzODQ1Mw== +-----END PRIVATE KEY for 48c77bbf5d619fb13d76883736c664493b323c00f89131c9d70b3e4ac875103bd8248e790e47a82c9fdcd46fe33b52093a4b3b248ce20e6f958acd22dfb17335fcaf752bab5e29934f0a7e0af54fb2f51a9e6b1be30abdd701f7c9fbd0ad5d8e----- +-----BEGIN PRIVATE KEY for dd59e326a160d3b49ca25f2ab93b4f7ba766b124de66b68507b9e7a9cf69df7c4fca695592eb31e7e63061daef52d30cc1d362fc612d22631398cad4af46969e35407b293808133fc130b8f930ba41c6b88bc9ed9b884892113593d3ffc55297----- +NThiOGMyNWVmMThmNTJhM2NhYTRiMjEwMWRhMTdhN2YwMTg1MWU2Y2RjZTRiZjM5 +ZTNmOGRjNzY0OThmMmU1OQ== +-----END PRIVATE KEY for dd59e326a160d3b49ca25f2ab93b4f7ba766b124de66b68507b9e7a9cf69df7c4fca695592eb31e7e63061daef52d30cc1d362fc612d22631398cad4af46969e35407b293808133fc130b8f930ba41c6b88bc9ed9b884892113593d3ffc55297----- +-----BEGIN PRIVATE KEY for 6b1a8bf3e5e7bacaaf99e3c89891a8a4ec7e9022986043f8206db9171ede3bd8cdcbbd7e8e1234180de5d651110ef706a8d964cb35048bc961611b55c8d9bd1b942b93c7e1b88157e7f79f2c08dbabe1af4612afe6044ab1be316976111b7019----- +NzJhNGVhN2E4ZmExZjQ3ZGUxY2ZjNzQxZGFjOGU5Zjc4ZDdiMWQyNWNlMDBkNTY1 +YWMyOGZkYzkxNDQ1NTYzNA== +-----END PRIVATE KEY for 6b1a8bf3e5e7bacaaf99e3c89891a8a4ec7e9022986043f8206db9171ede3bd8cdcbbd7e8e1234180de5d651110ef706a8d964cb35048bc961611b55c8d9bd1b942b93c7e1b88157e7f79f2c08dbabe1af4612afe6044ab1be316976111b7019----- +-----BEGIN PRIVATE KEY for d8f8ef204ac892dd1a04648fc449ffdd11418dbd5c8fe623e5efda0bcae47cb41eb99c981585d80be1d668d8b7466619b6ead4d83976cc4f6879627a455603a74ab2adbfb5fed0f1a2b954363d97cbd3ac7feb284c83ac64422fad518e589c8e----- +NDFiNDAxYjBkZDdmMDFhNDEwNmZjYmNjMDAwZDkwMWY5NWYwZTg4YjQ4ZjFmNzlh +MmY1ZmE5NWZjOTNjNWQxZA== +-----END PRIVATE KEY for d8f8ef204ac892dd1a04648fc449ffdd11418dbd5c8fe623e5efda0bcae47cb41eb99c981585d80be1d668d8b7466619b6ead4d83976cc4f6879627a455603a74ab2adbfb5fed0f1a2b954363d97cbd3ac7feb284c83ac64422fad518e589c8e----- +-----BEGIN PRIVATE KEY for f547e115b1ada7cf9b8aeef45ee0d9ec4b206315ef44be706d994a0571688cd96291d1ab6c3761df29d00a2ba290a3185e4796bc49891906f86e16da01af3fd52320944b96b60e679ac8e686d4819e97e15e5fe46503c556b4acdd8079624005----- +ZmFmMDA2YjRhYjNiZDhiZTg4ZTYwMWZjNDIyNjVlZjliMTQwZTRiNDNjYTNhYjVh +YzVlNGQ4NmUxOTkzNzY2Mw== +-----END PRIVATE KEY for f547e115b1ada7cf9b8aeef45ee0d9ec4b206315ef44be706d994a0571688cd96291d1ab6c3761df29d00a2ba290a3185e4796bc49891906f86e16da01af3fd52320944b96b60e679ac8e686d4819e97e15e5fe46503c556b4acdd8079624005----- +-----BEGIN PRIVATE KEY for 0e5cc5f218a8fa9bae1f6b452430c0de205e6b251d0f1d606d1ce28203fb556768e6c4545ce8e90d640ef2cc1062f40ccf2ede124b926cbf3b2b0050b0e19f67e7e36ac1a7049178a77cbd65ee30cd0a40d9f98846ce439cc120717501f03180----- +YzFiYzc1YjNjM2U0NWM4MjM5OTRjNWM0MTQzZDNhNWMzOWQ3YWY2ZmM2OTE0ODZi +NzdmZGU3ZTY1YjljZGIzNw== +-----END PRIVATE KEY for 0e5cc5f218a8fa9bae1f6b452430c0de205e6b251d0f1d606d1ce28203fb556768e6c4545ce8e90d640ef2cc1062f40ccf2ede124b926cbf3b2b0050b0e19f67e7e36ac1a7049178a77cbd65ee30cd0a40d9f98846ce439cc120717501f03180----- +-----BEGIN PRIVATE KEY for 760dc22525dce5be65a3a55ee07f7f012e0a89f435daec56eb475b0b5ca2d84b157894b8df64dfb570ecc633d5e1611639d43976e29f11c232236a9548b0145ee4e43fe495252c8c1f006b8df51d3835dee64a826f43167096b347b6919aa292----- +OGM2NjdjNTM2NWViNDZhMGExMDZmZDA1ZmZhYmUxNWU5NjA4NzU3ZWE0MDA4MzE5 +YmM4NmQ5MjY3YzNiMDIxMQ== +-----END PRIVATE KEY for 760dc22525dce5be65a3a55ee07f7f012e0a89f435daec56eb475b0b5ca2d84b157894b8df64dfb570ecc633d5e1611639d43976e29f11c232236a9548b0145ee4e43fe495252c8c1f006b8df51d3835dee64a826f43167096b347b6919aa292----- +-----BEGIN PRIVATE KEY for 39316473217f7c435a543efa078254198dd079e3c6505e7cc1564b033de8a161dc2e9c392b1e584440510113b5942816102d7be5f4af9461af21a454fc1938a962b256c1c1d1f939198029ed0bf22c62893038d5687787cb46436c0ef4f12417----- +NzdkYjE3MzMyOWY0MjIyYTMxOTFlZDUwMzM2MWZjZDQ2NTkwZjRhZmIxZjYwNWQx +MTMxYjNjOTg5MzRhNDc2MQ== +-----END PRIVATE KEY for 39316473217f7c435a543efa078254198dd079e3c6505e7cc1564b033de8a161dc2e9c392b1e584440510113b5942816102d7be5f4af9461af21a454fc1938a962b256c1c1d1f939198029ed0bf22c62893038d5687787cb46436c0ef4f12417----- +-----BEGIN PRIVATE KEY for 6a3b9f6b5fd38e79433daa9bf03543314f8a2a3d9f1fec8ebe2bc1ee97f135d83845dcecd201207c1b31d7624ddb330ae67fbfab4137cd734d96bc0975ae8bcfeecc4441b384d39d6900cdb7436450c23b4cc7674ec50055ea4a90861c503a91----- +MjkwNThjZmJmYzAxM2I2YjJlYzgzMTA5MWY0MWIzNzVkNDUzMTRiZTNmOTRiNjA3 +MDY1MzJmZWEwNzUyMDUzZA== +-----END PRIVATE KEY for 6a3b9f6b5fd38e79433daa9bf03543314f8a2a3d9f1fec8ebe2bc1ee97f135d83845dcecd201207c1b31d7624ddb330ae67fbfab4137cd734d96bc0975ae8bcfeecc4441b384d39d6900cdb7436450c23b4cc7674ec50055ea4a90861c503a91----- +-----BEGIN PRIVATE KEY for ee8c987b9af9bba2912763fb7fcd6d6575db60806c5041fa91816ecc339ccfd60bf3cf49fb7017158f0b8e6050276907620bc040816207f14a952bb86752816231ae7f31ff701862cfe0abca367fc4cd63bafd4ad6e4df67612e4ec71462650c----- +NTk1NjY3ZjUzMjg2MjUxYjc2MWNlNDIyOWNjMmNlYTBlOWVmNDg4MjJmNTk3MmU3 +NDZiZDM2ZGY2ZTY0OTM0Ng== +-----END PRIVATE KEY for ee8c987b9af9bba2912763fb7fcd6d6575db60806c5041fa91816ecc339ccfd60bf3cf49fb7017158f0b8e6050276907620bc040816207f14a952bb86752816231ae7f31ff701862cfe0abca367fc4cd63bafd4ad6e4df67612e4ec71462650c----- +-----BEGIN PRIVATE KEY for cfe96f6b010d08f211c83f4ae3eb451d1d5205a50bdcd451706044dc21f523d25f214ab89dd5aab7ae03111197d6e6156e70ab348c9b0fab0a7839ea57fef6cd2324882b4387014dba201e6f87d5ca395e14d900e4563494f4f11a69ef6cdf14----- +MTJjMzU0MzQ1ZDMzNTc2YTk4ZDQ0NjljZmY4Y2FlYWQ1ZDRmODgxODIwOGI0M2Vi +MmM2YzZiY2E4NjU3MWUxMQ== +-----END PRIVATE KEY for cfe96f6b010d08f211c83f4ae3eb451d1d5205a50bdcd451706044dc21f523d25f214ab89dd5aab7ae03111197d6e6156e70ab348c9b0fab0a7839ea57fef6cd2324882b4387014dba201e6f87d5ca395e14d900e4563494f4f11a69ef6cdf14----- +-----BEGIN PRIVATE KEY for 05e9e43732ecff55e553b35b5ee1416065818db162a6fbf096186a1230d88bd057cebb72c5afaec16a803c4c4f69770752fe29be73a4069d0d01666ede963271192d4f324f2b3dcaec8b2c871c23cf185579a039dd5ab093c7cd9bca53e09c85----- +MGMwM2JmYjcyMDI1OGU1NWVkNTU1NDk5ZjNiYWNlMDIxMjU4OTc3NDAwYzA5NGQ2 +YTg4NzViZWQ4NDA4MzIzYg== +-----END PRIVATE KEY for 05e9e43732ecff55e553b35b5ee1416065818db162a6fbf096186a1230d88bd057cebb72c5afaec16a803c4c4f69770752fe29be73a4069d0d01666ede963271192d4f324f2b3dcaec8b2c871c23cf185579a039dd5ab093c7cd9bca53e09c85----- +-----BEGIN PRIVATE KEY for 03e79df244aef557cd6e3b9c7e8063575b6cce83bbff005a0abf96000d0a93652ef0071decdd3ce052aab0912e4d510566af6273d91f41b0d8505a8ca69cff449dc979ff0c3c9c319feab9f2d3a965a49c1bf0d899e85fb3851951a798b0ab03----- +NjIyMWJmYzY2ZmYwYzg1OWY5MTYxZDNkZjY3NGJmMWQ4ZjkwZjExZThmN2MyMWU3 +NzM5NDVmYTIzYTQ2YjUzYw== +-----END PRIVATE KEY for 03e79df244aef557cd6e3b9c7e8063575b6cce83bbff005a0abf96000d0a93652ef0071decdd3ce052aab0912e4d510566af6273d91f41b0d8505a8ca69cff449dc979ff0c3c9c319feab9f2d3a965a49c1bf0d899e85fb3851951a798b0ab03----- +-----BEGIN PRIVATE KEY for 06d0a320440091cc0f44979753c036744cd762d2bf7aa3b61b425397b74c27b581ec39d250818068eef095cb0a49be0c586526bcc9bd98ec3120b9633efd390d4d9a7b6ffcafae8bdbbcf2c98f0cb2cd75897c4d6701b9d77861bf7ab8be3f88----- +OTdkYTJmM2FkOWI1NzRmZTg2N2U1Y2YzMmEwMWYwNjEzOGE2OGM0NjUwNWQzNTI4 +NmJlM2Y4OTQzMDQ3YmIwMg== +-----END PRIVATE KEY for 06d0a320440091cc0f44979753c036744cd762d2bf7aa3b61b425397b74c27b581ec39d250818068eef095cb0a49be0c586526bcc9bd98ec3120b9633efd390d4d9a7b6ffcafae8bdbbcf2c98f0cb2cd75897c4d6701b9d77861bf7ab8be3f88----- diff --git a/cmd/node/config/testKeys/group3/allValidatorsKeys.pem b/cmd/node/config/testKeys/group3/allValidatorsKeys.pem new file mode 100644 index 00000000000..3503b12fbf2 --- /dev/null +++ b/cmd/node/config/testKeys/group3/allValidatorsKeys.pem @@ -0,0 +1,64 @@ +-----BEGIN PRIVATE KEY for 82cfc47999d1499cb880d46e8280b8c4fe576dff20a8ca6f6ac551887c637f935153f9ce2f21921a532477535d42ac05f730760c78415756add2eab6d57d94916f3ad51590b23404739d152f89b6d052df48cace1793897cd4eba722247a6195----- +OWQyYTcwMWQxOGNlNzE4NjQzNDNhNDI5YWY4OGM1YTc3YTEzMjg3MjY1ZDFhMDEz +ZjZhYWFhZGI1NDU4YTM0NA== +-----END PRIVATE KEY for 82cfc47999d1499cb880d46e8280b8c4fe576dff20a8ca6f6ac551887c637f935153f9ce2f21921a532477535d42ac05f730760c78415756add2eab6d57d94916f3ad51590b23404739d152f89b6d052df48cace1793897cd4eba722247a6195----- +-----BEGIN PRIVATE KEY for 7675098e73574db9c59bdce61e4b80251c6536201715dca40b2b69c09ce097690f3a9095d22b006531e3b13b30894803bd7ede3e6d80c9064c431f8671db085ab1052354cb26a7a2436340b273b6c95c84ab94bb9531b99c5f883602b5284017----- +MWFjOWZkZDFlNWZhMmI5NzAxZTVjZWY4ZGFjMTUzMDgyMjE5MjE2YWFhMTU1NzM0 +NzdhMmNjZjhhN2Q4OTkzNg== +-----END PRIVATE KEY for 7675098e73574db9c59bdce61e4b80251c6536201715dca40b2b69c09ce097690f3a9095d22b006531e3b13b30894803bd7ede3e6d80c9064c431f8671db085ab1052354cb26a7a2436340b273b6c95c84ab94bb9531b99c5f883602b5284017----- +-----BEGIN PRIVATE KEY for c50f398a853c670ed625a12eddae175df5a90e034a54484a832566fc91f9b83d5daf1bc821cc347ba7e45f3acd4e1d00d0d7f52235824fd1326a7f370b58fc7dd98edfff4a41739a2015c6ed3a3c0bf3c986efeee187be70f1133fc4379dad95----- +MTE5NWQzZjk0OTk1MDNhMDBjMzhmOWY2NzQwNDZmMzQ4MGZiODk4YzZiZWNmOGVi +ODU5ZDU2MWUxOWY5MGY0YQ== +-----END PRIVATE KEY for c50f398a853c670ed625a12eddae175df5a90e034a54484a832566fc91f9b83d5daf1bc821cc347ba7e45f3acd4e1d00d0d7f52235824fd1326a7f370b58fc7dd98edfff4a41739a2015c6ed3a3c0bf3c986efeee187be70f1133fc4379dad95----- +-----BEGIN PRIVATE KEY for 4bd3f30f608b22b32100c6360def228ec95aa24e3048010bb64606392f602e180a0b2a12f7f92ef1d7f73ce1271ae30693bec692b15802c7ba079939640570fdc7f4d411c084ed0fe612ee223227ca3d02dc9732cf686ba8885007de53f8ec89----- +ZGU3YmUzZGU1NzdiNjk3OTY4ODJkYzljYjY2MzE5NTc2YzJlM2M4Y2Q4MDRlMjJm +YzMyMmZmYmVlM2Y3MGY1Mg== +-----END PRIVATE KEY for 4bd3f30f608b22b32100c6360def228ec95aa24e3048010bb64606392f602e180a0b2a12f7f92ef1d7f73ce1271ae30693bec692b15802c7ba079939640570fdc7f4d411c084ed0fe612ee223227ca3d02dc9732cf686ba8885007de53f8ec89----- +-----BEGIN PRIVATE KEY for 71e8458f92997a00c1cd0e638b9ec42ab136828fc13f0ec643b60af451270cc81d50f4c4578a7c93a700ee21e065281593e7995d2454356cbfdeadb9ffe7bf33ba8f7a31a1d2e76bba5a5f88a613ef37e35595838d0b7f4bd12da7d6fe743499----- +ZjJkOTY0ODVlZDk3YmQ1YWQ3M2M0OTk0NDg1ODIyMGNiMTY0ZDg1YTAwZWEzZTlm +YzYwMjY1ZGM3YjliMTMzNQ== +-----END PRIVATE KEY for 71e8458f92997a00c1cd0e638b9ec42ab136828fc13f0ec643b60af451270cc81d50f4c4578a7c93a700ee21e065281593e7995d2454356cbfdeadb9ffe7bf33ba8f7a31a1d2e76bba5a5f88a613ef37e35595838d0b7f4bd12da7d6fe743499----- +-----BEGIN PRIVATE KEY for d9c30948bffad18776b786f6367142b76605ac6e33a8d38c68c31c7afb099f1a83efb752a87afaf9d04a4a8fb656e40bfe2a4aa6e0c16b82d22bd6c232c2ce5e6672ac6232d2da6945bc033b04cbaaeb4b9af4b29585094e034ab8dcfb8b9c19----- +MmJjYzZkZmYzMDc5MjlmNjg1M2M5OTViZjA5ZWRiYjMxYWFhNjYwZDVjMTc1NTM3 +NzFjMmYwNGEwOWFkOWMxZg== +-----END PRIVATE KEY for d9c30948bffad18776b786f6367142b76605ac6e33a8d38c68c31c7afb099f1a83efb752a87afaf9d04a4a8fb656e40bfe2a4aa6e0c16b82d22bd6c232c2ce5e6672ac6232d2da6945bc033b04cbaaeb4b9af4b29585094e034ab8dcfb8b9c19----- +-----BEGIN PRIVATE KEY for 55fc7ab2e8c0a07bef2e1a9b35764cee1d604cb5634b7226a7310ce56a1f02e99d248fc5b416c4253ac7b88353b1a60f31e1104534e36cb00f46bdcb20a0d24f453e2c8d3cc48dc3c6086edbe16149aae14eb3a4d24ee2b217a4759bc0c0ea88----- +YmY3YjhmZjgxZmMzMzhjZWYwNzQ3ZWM1NzdlMzI3NTVkYTdjYThjMWVlN2QxYWNi +YzNkZDJhZDNhM2RkYzgzYg== +-----END PRIVATE KEY for 55fc7ab2e8c0a07bef2e1a9b35764cee1d604cb5634b7226a7310ce56a1f02e99d248fc5b416c4253ac7b88353b1a60f31e1104534e36cb00f46bdcb20a0d24f453e2c8d3cc48dc3c6086edbe16149aae14eb3a4d24ee2b217a4759bc0c0ea88----- +-----BEGIN PRIVATE KEY for a8e662e63ad0e87f2dc66cbed41d398b73a2da2aaced6cc466ed378b62daee28b3db8e8327a06278a094b05840965c17448ffc8a1c96e532a7960d1a15d2cabd16edadc476bfb4af3a825aff801f615d127b70b4745b88e01627a99ba52d5317----- +NWUyYWQyMGU5MzliMDUzMDU3Y2FkYjNkYTU0NmRkOWIyYjI3ODE1MWJkZDc1ODBl +MGFmYWEyZDM3YTZmNGY2Nw== +-----END PRIVATE KEY for a8e662e63ad0e87f2dc66cbed41d398b73a2da2aaced6cc466ed378b62daee28b3db8e8327a06278a094b05840965c17448ffc8a1c96e532a7960d1a15d2cabd16edadc476bfb4af3a825aff801f615d127b70b4745b88e01627a99ba52d5317----- +-----BEGIN PRIVATE KEY for f5e5eb9dd18aeb5d4829ab08795a9e4c8632a4fd248feed68382add1f2474d3cec042d51b897871bfee1f1c1fbeabf13d1c39d4f9b412948d27737f2b82e85474b7049a700ee8735373564791f0d20692dd1f8b494de7bab0a8415f01532ed90----- +NGNmNTQxMDMyYmNkNjQ3MWU0ZGNkN2NjYzZkNGY5ZDg4MTgwMThiMGIyOWE5NGZi +YTBlMTA2YmJlMTExMzMzMQ== +-----END PRIVATE KEY for f5e5eb9dd18aeb5d4829ab08795a9e4c8632a4fd248feed68382add1f2474d3cec042d51b897871bfee1f1c1fbeabf13d1c39d4f9b412948d27737f2b82e85474b7049a700ee8735373564791f0d20692dd1f8b494de7bab0a8415f01532ed90----- +-----BEGIN PRIVATE KEY for e66d7ac5e51a382164aeaae9924dda3272296a145d3c6178b962e3b7eb83e75515e665c327e86f3ef597ca840f8c5c0ace19ac9a8fbcdc573f9237d112fb1c467d646737863ccd1fe61f4c4341f9805f8e1fe98348b50c3c3f93f62de3975980----- +Mjc5N2ZjYjViYWMyOTJmOTZhMGI3NmYwNzhjZjVjMWJkMTkzYThjNmY1YWQ4NTdl +ZGU5MmU1MjVhMDE3NGIwNA== +-----END PRIVATE KEY for e66d7ac5e51a382164aeaae9924dda3272296a145d3c6178b962e3b7eb83e75515e665c327e86f3ef597ca840f8c5c0ace19ac9a8fbcdc573f9237d112fb1c467d646737863ccd1fe61f4c4341f9805f8e1fe98348b50c3c3f93f62de3975980----- +-----BEGIN PRIVATE KEY for 2a1c49643e564cdf28bba96dfd6cd8ad38a5958b2b3c9a8293ffb54e9df0a0188a67de2fb947b8ae3dd06b7411aaae0e8bedef795ad3b35ac9f1402dcd0631d9d530b01b3880362fbd3ed9a8488ecabfb1b46cac225c5d48c39be3e28503f90f----- +OTFjZTI1YzZiMjU2ZDZjNzE1MzIwMDUwYjIzZGU2YmI1NmNlYjc5Mzc0M2YyYTcz +MDRiOWUyN2ZjMjhkNmUxYQ== +-----END PRIVATE KEY for 2a1c49643e564cdf28bba96dfd6cd8ad38a5958b2b3c9a8293ffb54e9df0a0188a67de2fb947b8ae3dd06b7411aaae0e8bedef795ad3b35ac9f1402dcd0631d9d530b01b3880362fbd3ed9a8488ecabfb1b46cac225c5d48c39be3e28503f90f----- +-----BEGIN PRIVATE KEY for 5c9784523f360a802d4687c9c76bcef41a738d034aa8503a055c33898504b09670c1f637ca632e5290b3acf79a2191072f68c4192a9cbeb34f50c4a941e34247a64f642a6a074bec683bdfb83587cfdc0390ebd74505cb836cf04f3268e32f99----- +ZWMzOTQ2YTBlYmY2MjY5YTQwNWRkOTI2ODcxNjEzODVkMTUxYmEzZjRiOThlYTBj +YzUyMzc1OThiYmVkOGIzZA== +-----END PRIVATE KEY for 5c9784523f360a802d4687c9c76bcef41a738d034aa8503a055c33898504b09670c1f637ca632e5290b3acf79a2191072f68c4192a9cbeb34f50c4a941e34247a64f642a6a074bec683bdfb83587cfdc0390ebd74505cb836cf04f3268e32f99----- +-----BEGIN PRIVATE KEY for db7f7726c3e68abb28d070f529f2c222755d863aa9d7c0200fde10c93ccb8edcee8d45c9eb925bd5a0fa33c54d19270b7058f6e72256dad84214375f189310a73153cd84feef4b493ab61437b0cbcc2c592e6c093653a533631c8e0ab036c207----- +ZjFiODNjZTc2Y2Q1NGQzOWViNWFhNDNlMzdiNTBjMWJiNjY3YzVlNWQwNzg5YTg5 +ZWJlMWQ2NWE1ZmExZmQ1Nw== +-----END PRIVATE KEY for db7f7726c3e68abb28d070f529f2c222755d863aa9d7c0200fde10c93ccb8edcee8d45c9eb925bd5a0fa33c54d19270b7058f6e72256dad84214375f189310a73153cd84feef4b493ab61437b0cbcc2c592e6c093653a533631c8e0ab036c207----- +-----BEGIN PRIVATE KEY for a27b6f47c53263e5c8d69779a169d50605cdd7ddb4b5384f2d46e08ace6f787a60f6cf26256b62fafba9c91a87ff070bc99254fcb5a73239fc14f2108de62189005b51b21e2922b37c6cc657017832e3a59dfcc7a54ac5dcb997136da4e2748b----- +Mjk1YWExMDkzOWMyZWI2OGUyM2EzZWFmYzE1YjE2NmRjZDllMDIyZTUwYjU4MWE2 +ODcxN2NmN2E1ZDEyMmIxOA== +-----END PRIVATE KEY for a27b6f47c53263e5c8d69779a169d50605cdd7ddb4b5384f2d46e08ace6f787a60f6cf26256b62fafba9c91a87ff070bc99254fcb5a73239fc14f2108de62189005b51b21e2922b37c6cc657017832e3a59dfcc7a54ac5dcb997136da4e2748b----- +-----BEGIN PRIVATE KEY for 125af943ccf7405f204a34fe82d8b35f487d3c69c536311f999328ccaa7d1570626ea17c3fc4a75bba336746942e52025ebad7caf46e56ebd916178d89828ef3eb427c8e6c0cafe4adf91620e3ba23bb25cf751fc18f34775295765371c22b11----- +ZjU0OGUwZTZjODc0NzVjMTk2MjY5M2QzNzg2ZWIyZDMyYmViZDkxZmYwOWYxZThj +NGNhZWM3M2E5N2IwODk0OQ== +-----END PRIVATE KEY for 125af943ccf7405f204a34fe82d8b35f487d3c69c536311f999328ccaa7d1570626ea17c3fc4a75bba336746942e52025ebad7caf46e56ebd916178d89828ef3eb427c8e6c0cafe4adf91620e3ba23bb25cf751fc18f34775295765371c22b11----- +-----BEGIN PRIVATE KEY for a07ff46694c1faa139166fccf535ca18f1dada26d7ee4879334e70d0f4cd948deba46617ebeabc2f5c5e021a3b16e5099ee3eed5e03a259b3609fcac5256bb064ac0718c277018c6b2ab73f079ac81baca252afd031954af2883c8b2a4063909----- +OWM0NGIwY2U0OTliMDgwZjE1ZTBkYzdhMjg2MTY1ZThlMDU5MWU0Yjc3OTM0YzFl +NmQwNWJhOGQyMjk2NjA1MA== +-----END PRIVATE KEY for a07ff46694c1faa139166fccf535ca18f1dada26d7ee4879334e70d0f4cd948deba46617ebeabc2f5c5e021a3b16e5099ee3eed5e03a259b3609fcac5256bb064ac0718c277018c6b2ab73f079ac81baca252afd031954af2883c8b2a4063909----- diff --git a/cmd/node/config/testKeys/unStakedKeys.pem b/cmd/node/config/testKeys/unStakedKeys.pem new file mode 100644 index 00000000000..96a3bf2d715 --- /dev/null +++ b/cmd/node/config/testKeys/unStakedKeys.pem @@ -0,0 +1,24 @@ +-----BEGIN PRIVATE KEY for 283ccdc58e0df19717ecd0c4c3a553059bf6c8d91b9c7b624afa8cb0564c7fd86e5a199973d17b7b939e63186b25f20a7234ad7f162c8f2547ba2e326c30a6c0571ea04cba83b35fd9a2a60f13e95ee1767b5fe87cbf378458ac7e27b4833f96----- +NGJjZmIzODdkYmJkN2Q3NzIxOWVmOWFkZGI3OTMyZmRlYzcwNjZiOTk3MmVkNjg3 +ZjkyYmIyMzg5MGFhOTMzMQ== +-----END PRIVATE KEY for 283ccdc58e0df19717ecd0c4c3a553059bf6c8d91b9c7b624afa8cb0564c7fd86e5a199973d17b7b939e63186b25f20a7234ad7f162c8f2547ba2e326c30a6c0571ea04cba83b35fd9a2a60f13e95ee1767b5fe87cbf378458ac7e27b4833f96----- +-----BEGIN PRIVATE KEY for 7d1a1b4f36fcd8cea426005212022511bc25a414019e2b5c65947f00c28c8f1220ff1473c36efaa22a94d3e2b5258705ff6efb91902afb2c951c90502edf60072c3330ad9fd1b5c4b85226f10a474a0ddda9b61730946629b110b6eac70de70a----- +ZDE1ZDk1YzdhMGU1ZGY5MDRmNzQxODI2NDFiN2FlOGEwYmJkYzE5Y2RkOGNhMGZh +MzEyNDI3OTY2YjNkODE1YQ== +-----END PRIVATE KEY for 7d1a1b4f36fcd8cea426005212022511bc25a414019e2b5c65947f00c28c8f1220ff1473c36efaa22a94d3e2b5258705ff6efb91902afb2c951c90502edf60072c3330ad9fd1b5c4b85226f10a474a0ddda9b61730946629b110b6eac70de70a----- +-----BEGIN PRIVATE KEY for 03e79df244aef557cd6e3b9c7e8063575b6cce83bbff005a0abf96000d0a93652ef0071decdd3ce052aab0912e4d510566af6273d91f41b0d8505a8ca69cff449dc979ff0c3c9c319feab9f2d3a965a49c1bf0d899e85fb3851951a798b0ab03----- +NjIyMWJmYzY2ZmYwYzg1OWY5MTYxZDNkZjY3NGJmMWQ4ZjkwZjExZThmN2MyMWU3 +NzM5NDVmYTIzYTQ2YjUzYw== +-----END PRIVATE KEY for 03e79df244aef557cd6e3b9c7e8063575b6cce83bbff005a0abf96000d0a93652ef0071decdd3ce052aab0912e4d510566af6273d91f41b0d8505a8ca69cff449dc979ff0c3c9c319feab9f2d3a965a49c1bf0d899e85fb3851951a798b0ab03----- +-----BEGIN PRIVATE KEY for 06d0a320440091cc0f44979753c036744cd762d2bf7aa3b61b425397b74c27b581ec39d250818068eef095cb0a49be0c586526bcc9bd98ec3120b9633efd390d4d9a7b6ffcafae8bdbbcf2c98f0cb2cd75897c4d6701b9d77861bf7ab8be3f88----- +OTdkYTJmM2FkOWI1NzRmZTg2N2U1Y2YzMmEwMWYwNjEzOGE2OGM0NjUwNWQzNTI4 +NmJlM2Y4OTQzMDQ3YmIwMg== +-----END PRIVATE KEY for 06d0a320440091cc0f44979753c036744cd762d2bf7aa3b61b425397b74c27b581ec39d250818068eef095cb0a49be0c586526bcc9bd98ec3120b9633efd390d4d9a7b6ffcafae8bdbbcf2c98f0cb2cd75897c4d6701b9d77861bf7ab8be3f88----- +-----BEGIN PRIVATE KEY for 125af943ccf7405f204a34fe82d8b35f487d3c69c536311f999328ccaa7d1570626ea17c3fc4a75bba336746942e52025ebad7caf46e56ebd916178d89828ef3eb427c8e6c0cafe4adf91620e3ba23bb25cf751fc18f34775295765371c22b11----- +ZjU0OGUwZTZjODc0NzVjMTk2MjY5M2QzNzg2ZWIyZDMyYmViZDkxZmYwOWYxZThj +NGNhZWM3M2E5N2IwODk0OQ== +-----END PRIVATE KEY for 125af943ccf7405f204a34fe82d8b35f487d3c69c536311f999328ccaa7d1570626ea17c3fc4a75bba336746942e52025ebad7caf46e56ebd916178d89828ef3eb427c8e6c0cafe4adf91620e3ba23bb25cf751fc18f34775295765371c22b11----- +-----BEGIN PRIVATE KEY for a07ff46694c1faa139166fccf535ca18f1dada26d7ee4879334e70d0f4cd948deba46617ebeabc2f5c5e021a3b16e5099ee3eed5e03a259b3609fcac5256bb064ac0718c277018c6b2ab73f079ac81baca252afd031954af2883c8b2a4063909----- +OWM0NGIwY2U0OTliMDgwZjE1ZTBkYzdhMjg2MTY1ZThlMDU5MWU0Yjc3OTM0YzFl +NmQwNWJhOGQyMjk2NjA1MA== +-----END PRIVATE KEY for a07ff46694c1faa139166fccf535ca18f1dada26d7ee4879334e70d0f4cd948deba46617ebeabc2f5c5e021a3b16e5099ee3eed5e03a259b3609fcac5256bb064ac0718c277018c6b2ab73f079ac81baca252afd031954af2883c8b2a4063909----- diff --git a/cmd/node/config/testKeys/validatorKey.pem b/cmd/node/config/testKeys/validatorKey.pem new file mode 100644 index 00000000000..397c6629e6d --- /dev/null +++ b/cmd/node/config/testKeys/validatorKey.pem @@ -0,0 +1,96 @@ +-----BEGIN PRIVATE KEY for d3e0427c22ff9cc80ef4156f976644cfa25c54e5a69ed199132053f8cbbfddd4eb15a2f732a3c9b392169c8b1d060e0b5ab0d88b4dd7b4010fa051a17ef81bdbace5e68025965b00bf48e14a9ec8d8e2a8bcc9e62f97ddac3268f6b805f7b80e----- +MTMyZTliNDcyOTFmY2M2MmM2NGIzMzRmZDQzNGFiMmRiNzRiZjY0YjQyZDRjYzFi +NGNlZGQxMGRmNzdjMTkzNg== +-----END PRIVATE KEY for d3e0427c22ff9cc80ef4156f976644cfa25c54e5a69ed199132053f8cbbfddd4eb15a2f732a3c9b392169c8b1d060e0b5ab0d88b4dd7b4010fa051a17ef81bdbace5e68025965b00bf48e14a9ec8d8e2a8bcc9e62f97ddac3268f6b805f7b80e----- +-----BEGIN PRIVATE KEY for b0b6349b3f693e08c433970d10efb2fe943eac4057a945146bee5fd163687f4e1800d541aa0f11bf9e4cb6552f512e126068e68eb471d18fcc477ddfe0b9b3334f34e30d8b7b2c08f914f4ae54454f75fb28922ba9fd28785bcadc627031fa8a----- +NDkwYTU1YWI0MGNiZWE3Nzk4ZjdhNzQzYmNkM2RhNDQyNzZiZWM2YWQwODM3NTlh +NDUxNjY0NjE4NjI1NzQ2Ng== +-----END PRIVATE KEY for b0b6349b3f693e08c433970d10efb2fe943eac4057a945146bee5fd163687f4e1800d541aa0f11bf9e4cb6552f512e126068e68eb471d18fcc477ddfe0b9b3334f34e30d8b7b2c08f914f4ae54454f75fb28922ba9fd28785bcadc627031fa8a----- +-----BEGIN PRIVATE KEY for 67c301358a41bef74df2ae6aa9914e3a5e7a4b528bbd19596cca4b2fd97a62ab2c0a88b02adf1c5973a82c7544cdc40539ae62a9ac05351cfc59c300bbf4492f4266c550987355c39cff8e84ff74e012c7fd372c240eeb916ef87eead82ffd98----- +NTkwNzQzOTJmNGY5NzBjM2I1ZDRiYTE3ODM5NTVmY2Y5ZmNjNDRkOWE1YWZmMmI1 +Y2RkYjAwMjBjYTE1NWI1Yw== +-----END PRIVATE KEY for 67c301358a41bef74df2ae6aa9914e3a5e7a4b528bbd19596cca4b2fd97a62ab2c0a88b02adf1c5973a82c7544cdc40539ae62a9ac05351cfc59c300bbf4492f4266c550987355c39cff8e84ff74e012c7fd372c240eeb916ef87eead82ffd98----- +-----BEGIN PRIVATE KEY for ab0a22ba2be6560af8520208393381760f9d4f69fca4f152b0a3fe7b124dd7f932fd8c1fbb372792c235baafac36030ceaf6ebf215de4e8d8d239f347f2fed10a75a07cbf9dc56efbbfca2e319152a363df122c300cdeb2faa02a61ebefd8a0e----- +YTYwOTFmYjUxNzY0NTE5NjM5NmQwNGFhYjM2NzllNGYwNTlkYjlkODVjOTgxNjI1 +YzE5OTlkYWRhOTg1Y2Q1ZQ== +-----END PRIVATE KEY for ab0a22ba2be6560af8520208393381760f9d4f69fca4f152b0a3fe7b124dd7f932fd8c1fbb372792c235baafac36030ceaf6ebf215de4e8d8d239f347f2fed10a75a07cbf9dc56efbbfca2e319152a363df122c300cdeb2faa02a61ebefd8a0e----- +-----BEGIN PRIVATE KEY for caa87d67e195b52355d2c8f7f74c829395b134bd4a911f158e04b2d7e66a5ba195265743f10cf190105512fb3df9d708a8056c07a6165874d8749742502c0eada7d15b6c55f22c2cce2cf5001288f6b2d89319e6ff888344c01adcd362be8998----- +NDM2NDEwYTEwMmVmZDFjOWJjNjA2ZmRmM2FlNWI3ZDlkZTM3NjVkZDkxYTg0YjA1 +OTY4NjJjNTg3OTcwZjU3MQ== +-----END PRIVATE KEY for caa87d67e195b52355d2c8f7f74c829395b134bd4a911f158e04b2d7e66a5ba195265743f10cf190105512fb3df9d708a8056c07a6165874d8749742502c0eada7d15b6c55f22c2cce2cf5001288f6b2d89319e6ff888344c01adcd362be8998----- +-----BEGIN PRIVATE KEY for 598be7548d6bb605bd19d83037bf58a7797a4e48b33011a60a5633cf6fe8d59906130777c46f50a50d3d0f958effb5147befd5d67cbec7c5daddeaade4dca5d8a54fe0394fde7b6455e4fc4db91f33f907d450b45fc2d4a9990f96d893093d91----- +MTRiMjkxYzY1MzA0NzE1NzY1ZTYzYjUzMTUzYzNmZmIyNzNlZTRlMWNjYzY1ZTc4 +MjdhMDNmYmViMWRjZmE2NQ== +-----END PRIVATE KEY for 598be7548d6bb605bd19d83037bf58a7797a4e48b33011a60a5633cf6fe8d59906130777c46f50a50d3d0f958effb5147befd5d67cbec7c5daddeaade4dca5d8a54fe0394fde7b6455e4fc4db91f33f907d450b45fc2d4a9990f96d893093d91----- +-----BEGIN PRIVATE KEY for 69b277b127d025638dbb54d36baa8321540f6210fc5edaac77f94798c039a383aead3ae7c93cdfb8b4caab93a952d101ee2322c129b6ce2726359a65aa326bd35e54c974118503944fcaf80be80b5c3fc9cf86d574d0096140f16fbc55fc4984----- +Njc2ZDA3ZjBjNzQ5MWM4ZTYxOTg5NDdmN2Y1YThjMDcyMzAwZmM3NTlkYTkyOTQy +ODg5NjcyMDJhOTRiZWExNA== +-----END PRIVATE KEY for 69b277b127d025638dbb54d36baa8321540f6210fc5edaac77f94798c039a383aead3ae7c93cdfb8b4caab93a952d101ee2322c129b6ce2726359a65aa326bd35e54c974118503944fcaf80be80b5c3fc9cf86d574d0096140f16fbc55fc4984----- +-----BEGIN PRIVATE KEY for a006ad94b28c414c6ec0a5effb84594f39ede4f82b60aa077e2065b89407c78dd6479ebceed7bd42ed2779c34b718f11651427e550948cb8be2e6cea03a128ac3c52e599ada6f34912b119f94de472af0397a68769f1b3f647e87090918e030b----- +YzBkNjM4NjczODAxYWY4MWY5NWNkZjgxYzVkMWNiMTQwYWZjMmYwMjJkOTU3YTk0 +OGQ3ZTI4YTVjZjViMzE0Nw== +-----END PRIVATE KEY for a006ad94b28c414c6ec0a5effb84594f39ede4f82b60aa077e2065b89407c78dd6479ebceed7bd42ed2779c34b718f11651427e550948cb8be2e6cea03a128ac3c52e599ada6f34912b119f94de472af0397a68769f1b3f647e87090918e030b----- +-----BEGIN PRIVATE KEY for 91874fdfa8dfb85faf4f404b21c95fbb5d154db5a6abe46bd7860de9e5ddb78b61b5c6ddcf86e5ec8a237e130ed0fc0e418fb97d6fce5f6642ba33f99eff694ec7fb2921b423899a9a5888914bd625636a9b1ea186566561cd35b79aaca20e88----- +OTBhN2Y0YjlkNTVmMzliZmMzYmQ3Y2RiZWE2NWYyNmEzYThiNTk1ZjEyNzg5Yjlm +OGJmYzg5MDlhZTZjZmEzYQ== +-----END PRIVATE KEY for 91874fdfa8dfb85faf4f404b21c95fbb5d154db5a6abe46bd7860de9e5ddb78b61b5c6ddcf86e5ec8a237e130ed0fc0e418fb97d6fce5f6642ba33f99eff694ec7fb2921b423899a9a5888914bd625636a9b1ea186566561cd35b79aaca20e88----- +-----BEGIN PRIVATE KEY for cc3e0c1021f8c4c092499547b064cffef19d07f0bf250e5265cea1e49b282a7f6efb4b415ad37db2ef6efa253475f511e74efc2f76c087c9798f72187986bb752f61d0ac220045f8e2d945343f3bbb8ef34a6025fb855dd7d953a81477ad2309----- +OTc2NDdhMzYwODMyMTliZDhhYjI4NTYxYWQxZTRjOTZmNDdmNmUxOTM1NTVjNGY4 +MTc2ZDEwM2I4Y2Q0YjkzZA== +-----END PRIVATE KEY for cc3e0c1021f8c4c092499547b064cffef19d07f0bf250e5265cea1e49b282a7f6efb4b415ad37db2ef6efa253475f511e74efc2f76c087c9798f72187986bb752f61d0ac220045f8e2d945343f3bbb8ef34a6025fb855dd7d953a81477ad2309----- +-----BEGIN PRIVATE KEY for c2885340a6ba4341d68f80ce419deadf374bc52e2749c278b5bce5f795e9a90a04ef4f07a0b47777feb1982749b57a174b4927338df9da99a417a2df3152a9ebaf3465bfc092058324edf6892313f24be4612eb5663bb59d67a831dda135aa8b----- +MWQxOGIyMGFiZWUyNDFjOWU0ODEwZDQxMjI2ZGU4NDk3Y2FhYzk3OTczYmVhYzBk +YzUyYjI2ODg3M2FlMjM2NA== +-----END PRIVATE KEY for c2885340a6ba4341d68f80ce419deadf374bc52e2749c278b5bce5f795e9a90a04ef4f07a0b47777feb1982749b57a174b4927338df9da99a417a2df3152a9ebaf3465bfc092058324edf6892313f24be4612eb5663bb59d67a831dda135aa8b----- +-----BEGIN PRIVATE KEY for cf8a2f97b7822acb16016a6debaaedea39959c9ac60b80e50f24734a0e0f6128ed1d216f5aed71866ca34bb30b6e8300e7995237744e766f6016ca28d4ebb2274326cb7af1a3c12d795cc127a4bf9aa9497d89ef0450c40f675afd1afa761012----- +ZWRkY2RmNzg3NGQ3Y2M2N2Q2Yjc1OTRlOTlkY2JjMWY0OTNiNGEzNjA4ZWM0NTdk +MjY0NDU1OTJiMmYwM2YwNA== +-----END PRIVATE KEY for cf8a2f97b7822acb16016a6debaaedea39959c9ac60b80e50f24734a0e0f6128ed1d216f5aed71866ca34bb30b6e8300e7995237744e766f6016ca28d4ebb2274326cb7af1a3c12d795cc127a4bf9aa9497d89ef0450c40f675afd1afa761012----- +-----BEGIN PRIVATE KEY for 95a81b70474d59c1292bc5742db1a7b9bf03cb516ede6fb5cb3489ee812de8cccfc648f3ff3cda26106396a38c1c1f183b722392397a752d949c5123888b7a8ec012fe518f6efc25015a620b1559e4609286b52921e06b79fd563a9b3b4c4e16----- +MDUwNzJiZGQ3NGIyNzdkZTMzOTZhOGNlODk1ZGNmNzhhZWMzNGViYjJmNGI0ZmFi +MjI4MzVlNjhjNjUwNzMzZQ== +-----END PRIVATE KEY for 95a81b70474d59c1292bc5742db1a7b9bf03cb516ede6fb5cb3489ee812de8cccfc648f3ff3cda26106396a38c1c1f183b722392397a752d949c5123888b7a8ec012fe518f6efc25015a620b1559e4609286b52921e06b79fd563a9b3b4c4e16----- +-----BEGIN PRIVATE KEY for 5909def579148f456e8490659b859f80f8ccd62b5adda411e1acdc615c2ec795a88632cf2ec210a56ba91973fd3f07160f559f82f7afaafee008679fefb1b0cd2f26f4324197e6239c000accd1c427138568a8a9e276690c154d3df71a1f970c----- +OWMzYWU5MGNmOWJkOWIzZDUyOWE2YjBkZjMxOGU4MWU3MzRkNzA4MjdhMjZlYzc4 +YTcyZTBjYzhmYWQ4YzQ0Yg== +-----END PRIVATE KEY for 5909def579148f456e8490659b859f80f8ccd62b5adda411e1acdc615c2ec795a88632cf2ec210a56ba91973fd3f07160f559f82f7afaafee008679fefb1b0cd2f26f4324197e6239c000accd1c427138568a8a9e276690c154d3df71a1f970c----- +-----BEGIN PRIVATE KEY for 58d6cfe7e8c3ec675da17e492c4ba97759fa15fc0f41bbe29d1d49d5f5ca7db142450ada15e1e4bf4657614e26cceb04ed5c0ca17207b0e24c4baf5f91afc092d43a02aaeae76218420817c85292f8de7d3a2b4f3c8615c2bb6a6d1c74267788----- +N2YxOWM0MTU0NGIyMzAxYjA1NzBiM2E5MjhlODIyOTQyNTBlN2JmZjg4NTE3OTll +MTRhNTk3NDZkNmFhYzQ0ZA== +-----END PRIVATE KEY for 58d6cfe7e8c3ec675da17e492c4ba97759fa15fc0f41bbe29d1d49d5f5ca7db142450ada15e1e4bf4657614e26cceb04ed5c0ca17207b0e24c4baf5f91afc092d43a02aaeae76218420817c85292f8de7d3a2b4f3c8615c2bb6a6d1c74267788----- +-----BEGIN PRIVATE KEY for eb79770be0ae70e1d6932832eab94117b0c1a2442b3fdb380b1ad5a809b6221a4905e02a628886c925d152c4e5006413fe69d1f11cf543f4802d4ce4e5eac2b18b78a79215c737e2e098b40802044bc6e946b712299286c34f6d33d8b681790d----- +OWM1Njc4NjEyMWFiMmQ2MTdhYTIwM2QxMzU1N2QwNThmM2FhNDhhOTMyNWVhNzhh +N2NlODVhOTFjZGY4ODAwNA== +-----END PRIVATE KEY for eb79770be0ae70e1d6932832eab94117b0c1a2442b3fdb380b1ad5a809b6221a4905e02a628886c925d152c4e5006413fe69d1f11cf543f4802d4ce4e5eac2b18b78a79215c737e2e098b40802044bc6e946b712299286c34f6d33d8b681790d----- +-----BEGIN PRIVATE KEY for bc03265a52610464f2f0431a69647be3106924f5bf67cf87cd889bf86d81739b3f0f37bad11ab93c5209dc4496f4130d69a9649596b97884b7e91e0b4d7c59169dd0729ac3e3bcd308efac56bc29d3cc249d8759580ab117943aa40df3baac05----- +ZmEyMmRkODcyMzExMzgzZmRlNmE3ZWFmYTk1ZGZhNWRhMWNmNTJjYTE3NTc1NTdi +Yzk5MjAyNDE2YzFkY2IwNw== +-----END PRIVATE KEY for bc03265a52610464f2f0431a69647be3106924f5bf67cf87cd889bf86d81739b3f0f37bad11ab93c5209dc4496f4130d69a9649596b97884b7e91e0b4d7c59169dd0729ac3e3bcd308efac56bc29d3cc249d8759580ab117943aa40df3baac05----- +-----BEGIN PRIVATE KEY for aa4be8f36c2880ee4d2ca79dbd7a53537e3965f255dfb5c75324fe29fcb6ce56148fbaea334268e413f0df95f580c40fb3484165b2852236e3a1aa68151ac3327d981cfae52d99f9a564bd3139cdd768661854dae78880d9320191cdb2989815----- +MmRmYmFkMzMyNGMyZWEwNzZlZDQyYWY1NjFkZDRiZDdmMTU4ZGRiODQxZTUzMzYy +ODI5YmZlOWI5YzljYmUzMg== +-----END PRIVATE KEY for aa4be8f36c2880ee4d2ca79dbd7a53537e3965f255dfb5c75324fe29fcb6ce56148fbaea334268e413f0df95f580c40fb3484165b2852236e3a1aa68151ac3327d981cfae52d99f9a564bd3139cdd768661854dae78880d9320191cdb2989815----- +-----BEGIN PRIVATE KEY for 3e86fea8365791b3becfc9aa2bc239f6be58725e61e46e7935c56479ad285e0781da1f277980d2e1d0ecff3982f2d90f321aa03f3d934adf260628d0ed0dc81a98dfaf1e6278e042d6c78dc65f2fa79d3b457754a321b8a0d7bf9998feeea817----- +NTM4ZmFkYjlkZjRkMzJjZDcxMzU5MmZhN2Q1MWI2NmNjODg1MGQ0NmZjZDQ2YTIz +N2RmN2ExN2ZhODE5MjAxNQ== +-----END PRIVATE KEY for 3e86fea8365791b3becfc9aa2bc239f6be58725e61e46e7935c56479ad285e0781da1f277980d2e1d0ecff3982f2d90f321aa03f3d934adf260628d0ed0dc81a98dfaf1e6278e042d6c78dc65f2fa79d3b457754a321b8a0d7bf9998feeea817----- +-----BEGIN PRIVATE KEY for aa92cf6e0ac62df09e7adca139c41a162ad668e7a797770b6d195cd9b175d0fca9eac3f4bf859967139f2ba109741a144e3dc5e6ccaeb6cd21f1d202b10f08832274cd9cdf6b10dbc2c60acdd1c70ae9beae2139e2b69eccbcde32a7f3991393----- +ZjQ0ZDNmZDcyZTVmYjJmYmFiMTVkYjdlMmNjYTYzYzBjM2VjYWE0NjkwMjg0MTcz +OTQxZDIzM2FjMWEzZDQxMA== +-----END PRIVATE KEY for aa92cf6e0ac62df09e7adca139c41a162ad668e7a797770b6d195cd9b175d0fca9eac3f4bf859967139f2ba109741a144e3dc5e6ccaeb6cd21f1d202b10f08832274cd9cdf6b10dbc2c60acdd1c70ae9beae2139e2b69eccbcde32a7f3991393----- +-----BEGIN PRIVATE KEY for f2b7819d1c2e2e1d007edcf896034085645f3c81e7c7fe21aa7ad4f35f8b863ee1db13448d15a3d0d15018f741a991010a9374710b628e41ef078be8a10249f2a3000598432c28186af1c04a219ac914434dca9c27e61485d701505112093f8a----- +NTNiOGVmY2EwYmY0NmIzNjI1MzUzOGM1YjU2YjIzYTg4MDgxYWUwOThmZjk0Y2Yx +YjI2OGIwYmYzOTQ4ZmIwZA== +-----END PRIVATE KEY for f2b7819d1c2e2e1d007edcf896034085645f3c81e7c7fe21aa7ad4f35f8b863ee1db13448d15a3d0d15018f741a991010a9374710b628e41ef078be8a10249f2a3000598432c28186af1c04a219ac914434dca9c27e61485d701505112093f8a----- +-----BEGIN PRIVATE KEY for 292742eee9d12dade21b4cd8bcd44c210c26d927ef6dbd9cad59008643a971a86ea6dfce247515d4266789b3fe8e35167278e781e52b4cd7b9781554ba67ecc08680eb19628e7741c94d8456090a08aceab1c8d2ed39bf59e8e282381aa32a0a----- +NjFjZmE3YmYyNTZhNTIzY2FjM2ZiY2I4NzQ5ZDVmZWNhNzc1OWU1YmZlMGM2OWY5 +YmRkNTU0MGU4MmMwYTQwOA== +-----END PRIVATE KEY for 292742eee9d12dade21b4cd8bcd44c210c26d927ef6dbd9cad59008643a971a86ea6dfce247515d4266789b3fe8e35167278e781e52b4cd7b9781554ba67ecc08680eb19628e7741c94d8456090a08aceab1c8d2ed39bf59e8e282381aa32a0a----- +-----BEGIN PRIVATE KEY for 11f784d2970d65769ce267710b3d08b28b78c3f79283758918c8ef15717ccbe90c23348cafe0e98a5d101b8dafbe7d081c6821dee8bf40ba150664ccc2dbbdd6358c92404e677d82910ce61f1d7584fbbbc9ebf71b7f35a118556e2a5c220501----- +MjU2ZGI2MmU3ZTBmMzkzMjlhYmM1YzE1NWM2NmE0YTdhNmRhOTY2MTVmMDgxOTMz +NTYwMzU0YjllNWQ3YjYyYw== +-----END PRIVATE KEY for 11f784d2970d65769ce267710b3d08b28b78c3f79283758918c8ef15717ccbe90c23348cafe0e98a5d101b8dafbe7d081c6821dee8bf40ba150664ccc2dbbdd6358c92404e677d82910ce61f1d7584fbbbc9ebf71b7f35a118556e2a5c220501----- +-----BEGIN PRIVATE KEY for 0382c11222db8a15e42e3ff64893df46c7720b439fb2a546462815ac0a8fa3bed99fceae5da9b68524e36f61cc074d09ceafec274c54f182c56a77583f9421f19c777265c43da1d5747304b36f0367cf3e8e5f63f41dad1a4362d9e1997a9e16----- +ZTUxOWQwNzcwZWRlZDhhNTFiMzIwN2M4MWRmMDhjMWZlMWZhMTQ1ZjFmYWQwNDU3 +YzI4NzRiNWQzYmY3Y2MwMw== +-----END PRIVATE KEY for 0382c11222db8a15e42e3ff64893df46c7720b439fb2a546462815ac0a8fa3bed99fceae5da9b68524e36f61cc074d09ceafec274c54f182c56a77583f9421f19c777265c43da1d5747304b36f0367cf3e8e5f63f41dad1a4362d9e1997a9e16----- diff --git a/cmd/node/config/testKeys/walletKeys.pem b/cmd/node/config/testKeys/walletKeys.pem new file mode 100644 index 00000000000..a0fe3cb02f0 --- /dev/null +++ b/cmd/node/config/testKeys/walletKeys.pem @@ -0,0 +1,175 @@ +-----BEGIN PRIVATE KEY for erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7----- +ODgxZTRhNGQ1ZDZmMjg5MmNlZGYxN2QwZDExMjlhMWNlZDk3NDFjYzhiZTc3Njc1 +M2EyNTdlYmM2YWMyYmYzMzI4NTYyNmRiYzI2NDIzODg0YTQ5M2YxZjU5NTJjNjE0 +ZTkyYzVhYWYyYzMyOTY5MGRhMzE3YTliNDkxNTc3Mjc= +-----END PRIVATE KEY for erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7----- +-----BEGIN PRIVATE KEY for erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk----- +MmIzNTVjMGRiYmY4MmVkNTljNDVmNzkzMDcwMTRhNmNiN2MzYmU5YzQzMDI1OWZl +ZjkwMzc4ODZmNTQ4ZjVlYzAwOGE4MGM0ZThhYWEyNzFjNWZlZjM4MTU1ODcwZjkx +YmEwN2E0ZmVjM2Q2YTlhYWUzODliNDljYTRmNDVjN2Y= +-----END PRIVATE KEY for erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk----- +-----BEGIN PRIVATE KEY for erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35----- +NDU4MmViYThmNTI5MDc2MDhmZThhNThhY2NhM2Y4NzgwM2Q2MjZlMGVjNjczZDRm +M2FkM2ZmNjQzZWIyZGJmODU4NTVkNGQ2NGM2ZGZjMWYxNzY0ZTUyZmE4MGQ3OGJk +ZWFhMGQzMzEwZTJlMDFlNjM5OTEwOTMyZWMxNzc3NjM= +-----END PRIVATE KEY for erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35----- +-----BEGIN PRIVATE KEY for erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84----- +NTlhYTVlOThlNTQ1MTVjYTEwMDFkNDU2ODAyNGRjNWNmMjI4MGE4ODFhMzNkOTQ3 +ZjFmMTQ1ZWZjZDY2YjEwNWNhOTJiOTU2ZjJhYzdmNjZmMWMxODE0Y2RkYWQxMjll +Zjg4YjVjYmI5YjQzN2FjZDU4MzI3NjlkNzEyYzlkNmQ= +-----END PRIVATE KEY for erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84----- +-----BEGIN PRIVATE KEY for erd1dzjes5c6a8ru45clgla3q0k3ezm06svefjz7vzs8pjfnrqa8tcasl4j8hs----- +OGZlOTExYjJmNjRhODRkYzI0MmMyZjNhZmIwNGJmY2QyZDRkOWM1ZDdiYzhmMGI0 +Mjc3NzVmZjU0NjkxYTFjOTY4YTU5ODUzMWFlOWM3Y2FkMzFmNDdmYjEwM2VkMWM4 +YjZmZDQxOTk0Yzg1ZTYwYTA3MGM5MzMxODNhNzVlM2I= +-----END PRIVATE KEY for erd1dzjes5c6a8ru45clgla3q0k3ezm06svefjz7vzs8pjfnrqa8tcasl4j8hs----- +-----BEGIN PRIVATE KEY for erd14gg3v6j4505ucx7t2wtl98tgupmyp748aq92jefmp5ha6e3pccgq9clwe9----- +ZDUwMzA4N2U4NWEyN2UyMDk0NDllMGIyZWFlN2M0Y2ViZmIwZTY0M2Q0MDg1NDZm +YzlkNTJmODJhOTBlMjg2MmFhMTExNjZhNTVhM2U5Y2MxYmNiNTM5N2YyOWQ2OGUw +NzY0MGZhYTdlODBhYTk2NTNiMGQyZmRkNjYyMWM2MTA= +-----END PRIVATE KEY for erd14gg3v6j4505ucx7t2wtl98tgupmyp748aq92jefmp5ha6e3pccgq9clwe9----- +-----BEGIN PRIVATE KEY for erd1xdfc44mk4ut5cv6l3mq0py6h88cty9ykacskm8xv3tvrp893kmxqppcefg----- +M2I0Y2M3NTQwNzA4ZGEwMWViOGMxNmY0MDFjMzAxZGFjNDI4Mzc5NjllNzU1MTJh +MjExZTBjMDBmMDI5YTRiODMzNTM4YWQ3NzZhZjE3NGMzMzVmOGVjMGYwOTM1NzM5 +ZjBiMjE0OTZlZTIxNmQ5Y2NjOGFkODMwOWNiMWI2Y2M= +-----END PRIVATE KEY for erd1xdfc44mk4ut5cv6l3mq0py6h88cty9ykacskm8xv3tvrp893kmxqppcefg----- +-----BEGIN PRIVATE KEY for erd1997jfwzrum4rrk59ar5supcyge9rpa73xgv2p45h3unt880v399svt8c9g----- +OTk0Yzg3YWFmOGMyYTI2ZmM5Yzc5YWJiODgwNDVmZGZhMWY5OTM0MjA5MTM3NDE0 +MWQwMWM1N2JiOGY5ODE0NjI5N2QyNGI4NDNlNmVhMzFkYTg1ZThlOTBlMDcwNDQ2 +NGEzMGY3ZDEzMjE4YTBkNjk3OGYyNmIzOWRlYzg5NGI= +-----END PRIVATE KEY for erd1997jfwzrum4rrk59ar5supcyge9rpa73xgv2p45h3unt880v399svt8c9g----- +-----BEGIN PRIVATE KEY for erd1e9cg9ys8fh77n9eaxpg47sxaes4fe9g2nvy6a65qpxykcx8grg9sv45lss----- +MjdlOTZjZDBjNGI0NTQxYjRkYzFjNjY4YjhmZDM0MWZhYWQ2MGM3M2NjNTM4YzM4 +M2QxZTBmYmRkN2I1NTk5N2M5NzA4MjkyMDc0ZGZkZTk5NzNkMzA1MTVmNDBkZGNj +MmE5Yzk1MGE5YjA5YWVlYTgwMDk4OTZjMThlODFhMGI= +-----END PRIVATE KEY for erd1e9cg9ys8fh77n9eaxpg47sxaes4fe9g2nvy6a65qpxykcx8grg9sv45lss----- +-----BEGIN PRIVATE KEY for erd1xdrltsygywhmtxzsmrgjlsxsxrf4y2ayv0z50y666dgsp66trxwqzajk96----- +Y2E2MzIxOGYzZGRjZjI1ZTIwZDM2MmQ3OWNjYWRiZDdhOTQ5ZWJjMjliYmE4YjZi +M2YyNDQyMWYwODgxNDJmMTMzNDdmNWMwODgyM2FmYjU5ODUwZDhkMTJmYzBkMDMw +ZDM1MjJiYTQ2M2M1NDc5MzVhZDM1MTAwZWI0YjE5OWM= +-----END PRIVATE KEY for erd1xdrltsygywhmtxzsmrgjlsxsxrf4y2ayv0z50y666dgsp66trxwqzajk96--- +-----BEGIN PRIVATE KEY for erd1lytewufjflpwl6gtf0faazjr59nd2fhfwlk7ew72hkpgdkmunl8qfrpywg----- +ZDFlNWMwZTA2NThlZmVmMjY3NWQ3YTBhYzUzZTY4MTJkYTdlMmNhNjhmNTRiMDdm +ZTRiMjYxYWFmZjM4Yzc2YmY5MTc5NzcxMzI0ZmMyZWZlOTBiNGJkM2RlOGE0M2Ex +NjZkNTI2ZTk3N2VkZWNiYmNhYmQ4Mjg2ZGI3YzlmY2U= +-----END PRIVATE KEY for erd1lytewufjflpwl6gtf0faazjr59nd2fhfwlk7ew72hkpgdkmunl8qfrpywg----- +-----BEGIN PRIVATE KEY for erd1s8tqztm4u4gw23489lps97qxe8vck8eln3a424y9c6yujsc96nas0l968d----- +OWZhYzA1YjhmOGEzNDEyYjkxMGQ0NjIyNzgwZjc4OGE1YmJiNThhNTlkODA3NmQz +YjFjMTNmZjM2MzdlZGYyYjgxZDYwMTJmNzVlNTUwZTU0NmE3MmZjMzAyZjgwNmM5 +ZDk4YjFmM2Y5YzdiNTU1NDg1YzY4OWM5NDMwNWQ0ZmI= +-----END PRIVATE KEY for erd1s8tqztm4u4gw23489lps97qxe8vck8eln3a424y9c6yujsc96nas0l968d----- +-----BEGIN PRIVATE KEY for erd1p7p0f3n8dxtj08hsp9hccqg932pd4f94rq3adg6g55etx8g4z8tsmg5e0g----- +NTI2NDc5M2JiMTgxZWY0YTAyNTIyYTUzNzUzYmYzODQ2M2FkODcwMmNlOWQwZWNl +MTQ1N2ExMDU0NmYyNzRmMTBmODJmNGM2Njc2OTk3Mjc5ZWYwMDk2ZjhjMDEwNThh +ODJkYWE0YjUxODIzZDZhMzQ4YTUzMmIzMWQxNTExZDc= +-----END PRIVATE KEY for erd1p7p0f3n8dxtj08hsp9hccqg932pd4f94rq3adg6g55etx8g4z8tsmg5e0g----- +-----BEGIN PRIVATE KEY for erd1uyeel03ea837dphrx2ak77hdvlhjdcqdwgyg6k99gqn602ymsn7qptmedj----- +ZTljNjFlM2QwMzQ3Y2QyMTc5MDI1YTM5NmVjNDYxZWU1NGU4ZGE0NzNjYzQyMTg1 +ZWUxNTFkOGM4ZjNkZDUzOGUxMzM5ZmJlMzllOWUzZTY4NmUzMzJiYjZmN2FlZDY3 +ZWYyNmUwMGQ3MjA4OGQ1OGE1NDAyN2E3YTg5Yjg0ZmM= +-----END PRIVATE KEY for erd1uyeel03ea837dphrx2ak77hdvlhjdcqdwgyg6k99gqn602ymsn7qptmedj----- +-----BEGIN PRIVATE KEY for erd1ftyzkdhl7rl782mrzrdc2jck3egydp0ydzhcjm9gc8s2jym5egrqadl4h6----- +YzI3YzY5MTgzMGUwYzJhNzlhZmVjYjI3N2UxMGRhOWZlNzZmYjUwZTJkMWQyNDc2 +YzZjNTgzNzVlMTgwZDc5NzRhYzgyYjM2ZmZmMGZmZTNhYjYzMTBkYjg1NGIxNjhl +NTA0Njg1ZTQ2OGFmODk2Y2E4YzFlMGE5MTM3NGNhMDY= +-----END PRIVATE KEY for erd1ftyzkdhl7rl782mrzrdc2jck3egydp0ydzhcjm9gc8s2jym5egrqadl4h6----- +-----BEGIN PRIVATE KEY for erd1rsl2sj5g87ltfq0hvrmgm35mlg4lzfs29p4gzxh0lh4vj2e8ykuqh69lha----- +NDg0MDgzZTIxMTk1ZGM2YjNjNmQwNTgwNWVmMGE2ZDhiYjdiMDYwMGZmMjFmMzIw +MGYwMzVhMTQwYjg2YTg2ODFjM2VhODRhODgzZmJlYjQ4MWY3NjBmNjhkYzY5YmZh +MmJmMTI2MGEyODZhODExYWVmZmRlYWM5MmIyNzI1Yjg= +-----END PRIVATE KEY for erd1rsl2sj5g87ltfq0hvrmgm35mlg4lzfs29p4gzxh0lh4vj2e8ykuqh69lha----- +-----BEGIN PRIVATE KEY for erd19yrjty2l4ytl6d3jynp5mqfekq4uf2x93akz60w7l3cp6qzny3psnfyerw----- +OGI3MDg3ZTk3NjQ3MmU0YzFiMDhmY2ZlNzQ5OGIwNDg5NTljYjZmYTlkMGExNjNl +YzFiMzk0M2NjMTk2N2Q4ZTI5MDcyNTkxNWZhOTE3ZmQzNjMyMjRjMzRkODEzOWIw +MmJjNGE4YzU4ZjZjMmQzZGRlZmM3MDFkMDA1MzI0NDM= +-----END PRIVATE KEY for erd19yrjty2l4ytl6d3jynp5mqfekq4uf2x93akz60w7l3cp6qzny3psnfyerw----- +-----BEGIN PRIVATE KEY for erd148lq42zdzz34y0yr8avldsy7gw0rmuvj4lmstzug77v08z3q0ncszfk8w9----- +ZjVlNTgzODEyZDIzNjgyNDlmMjczOTc1NGIwYWQ0NGY0ZWI0OTMyZDViZWJmMTM0 +ZjMyYzYzNDM0NDkyOTBhOGE5ZmUwYWE4NGQxMGEzNTIzYzgzM2Y1OWY2YzA5ZTQz +OWUzZGYxOTJhZmY3MDU4Yjg4Zjc5OGYzOGEyMDdjZjE= +-----END PRIVATE KEY for erd148lq42zdzz34y0yr8avldsy7gw0rmuvj4lmstzug77v08z3q0ncszfk8w9----- +-----BEGIN PRIVATE KEY for erd1k2v4h3805gnxf78c22g7lfe4pgq2lmr4ezmkk2rqkej6yjd7g5ssu88fme----- +YTEwMTM5NjQ0NjRlMzZhMDgyNTVkZTQyMTYyYmRhMjZiODVmNzEwOTgwZTAzM2M3 +ZGE0NjNjOTdlN2YyMzJkOGIyOTk1YmM0ZWZhMjI2NjRmOGY4NTI5MWVmYTczNTBh +MDBhZmVjNzVjOGI3NmIyODYwYjY2NWEyNDliZTQ1MjE= +-----END PRIVATE KEY for erd1k2v4h3805gnxf78c22g7lfe4pgq2lmr4ezmkk2rqkej6yjd7g5ssu88fme----- +-----BEGIN PRIVATE KEY for erd1nzjyj2ykpway04pczl42fgrlza2f0eaf97fxgnuuw39vyee36xlqccc3qz----- +Y2VlOGU0M2I4N2Q3YTBhM2E3ZmE3Y2ZiY2RhMTA0YjRhNGQ5YWUyMGNlZWZiODY5 +ODkyZmNiNWYxZTdjOGQzNjk4YTQ0OTI4OTYwYmJhNDdkNDM4MTdlYWE0YTA3ZjE3 +NTQ5N2U3YTkyZjkyNjQ0ZjljNzQ0YWMyNjczMWQxYmU= +-----END PRIVATE KEY for erd1nzjyj2ykpway04pczl42fgrlza2f0eaf97fxgnuuw39vyee36xlqccc3qz----- +-----BEGIN PRIVATE KEY for erd1yp0nvml5c45us3qzreqxkjxaakxn744t3gdva9s8xndcakzawutstepmm5----- +ZDA2NTdmMmU2ZTZmNjlkNTlkZjM0Mjc5NDhiODk5ODY3NDQ3ZmI4MDlhOTE3Yjcx +NjExZDg2ZGQ5ZjA4ZmMwMjIwNWYzNjZmZjRjNTY5Yzg0NDAyMWU0MDZiNDhkZGVk +OGQzZjU2YWI4YTFhY2U5NjA3MzRkYjhlZDg1ZDc3MTc= +-----END PRIVATE KEY for erd1yp0nvml5c45us3qzreqxkjxaakxn744t3gdva9s8xndcakzawutstepmm5----- +-----BEGIN PRIVATE KEY for erd1qyg80tr4rd65ur3hedm9h4yv3fcwmm6vnyrypnm972nd80889hxqdfgwrc----- +MTg4ZDlhNzE3NzAzNjYyMzY2YjE2NTIzYzI0MTliN2ExZjQ2OTk5Yzk5MmI5Mzcw +MDkxYTcxOGUwOTcxYjFkYjAxMTA3N2FjNzUxYjc1NGUwZTM3Y2I3NjViZDQ4Yzhh +NzBlZGVmNGM5OTA2NDBjZjY1ZjJhNmQzYmNlNzJkY2M= +-----END PRIVATE KEY for erd1qyg80tr4rd65ur3hedm9h4yv3fcwmm6vnyrypnm972nd80889hxqdfgwrc----- +-----BEGIN PRIVATE KEY for erd14x6d48q59zjh5p909fyw7e46czftgdawyf734cnmgk5e63ghrvvsqp254t----- +MzdmMDI3OGU4NGU3NjJlNzAzMzA3ZmY2MWQ4OGJlNjg5NDQ4MWVlNGNmZDI5NmQ1 +NjJmMjFkMWQ5MWE4OTFlOWE5YjRkYTljMTQyOGE1N2EwNGFmMmE0OGVmNjZiYWMw +OTJiNDM3YWUyMjdkMWFlMjdiNDVhOTlkNDUxNzFiMTk= +-----END PRIVATE KEY for erd14x6d48q59zjh5p909fyw7e46czftgdawyf734cnmgk5e63ghrvvsqp254t----- +-----BEGIN PRIVATE KEY for erd1wyxylus33e476h5kta7e0caeqvgvcgrxh0az33e7szya6g7mh2ws0n27sa----- +NTMwNjAxNzU5OThiYTIxNmRmN2EyN2E1Mjg3ZWMxODA4NjNiMTRkNjE5ZmFiY2U4 +ODhlMGU0MzIwNjFjMWM2MjcxMGM0ZmYyMTE4ZTZiZWQ1ZTk2NWY3ZDk3ZTNiOTAz +MTBjYzIwNjZiYmZhMjhjNzNlODA4OWRkMjNkYmJhOWQ= +-----END PRIVATE KEY for erd1wyxylus33e476h5kta7e0caeqvgvcgrxh0az33e7szya6g7mh2ws0n27sa----- +-----BEGIN PRIVATE KEY for erd1v3ylw7t6vzjzs06xjf6ccmf576ud38g2ws45tjkjg48s38jefpzqlwms9w----- +OWZhNzRmNTE2MTFiNDA5ZGU2YTIyZTI3NDQ5OTI0YmM2NDM4Y2E4ZWFjYzI0MTJj +Yzc0MjcwYjMzOGNlYTY5ZTY0NDlmNzc5N2E2MGE0MjgzZjQ2OTI3NThjNmQzNGY2 +YjhkODlkMGE3NDJiNDVjYWQyNDU0ZjA4OWU1OTQ4NDQ= +-----END PRIVATE KEY for erd1v3ylw7t6vzjzs06xjf6ccmf576ud38g2ws45tjkjg48s38jefpzqlwms9w----- +-----BEGIN PRIVATE KEY for erd1twel4azu6uptw878y063p93mjr84y5m4kpsww2aeqj4pg5jeplgst04rhg----- +MDNhOGM3OWQwM2M2MzljODUyZmFhNDlmZGFhODMyNjFhNGJjYjI4MDdmYWU1MGI1 +OTUyMzJjOGQwNTdiZWJkNDViYjNmYWY0NWNkNzAyYjcxZmM3MjNmNTEwOTYzYjkw +Y2Y1MjUzNzViMDYwZTcyYmI5MDRhYTE0NTI1OTBmZDE= +-----END PRIVATE KEY for erd1twel4azu6uptw878y063p93mjr84y5m4kpsww2aeqj4pg5jeplgst04rhg----- +-----BEGIN PRIVATE KEY for erd1q2se75ucl9as9j7e48v00jrnj6hvtk5vqxa4a3ag5729vctsdkasm20cyc----- +YjhiYjRhOTFmOTEyNjAwZWViYmI5N2MzYzBlOGQ1NTc3YzQ4OGE2M2IwZDhhZmY5 +ZjI2NjNhNzcyOWI5ZjMyZjAyYTE5ZjUzOThmOTdiMDJjYmQ5YTlkOGY3Yzg3Mzk2 +YWVjNWRhOGMwMWJiNWVjN2E4YTc5NDU2NjE3MDZkYmI= +-----END PRIVATE KEY for erd1q2se75ucl9as9j7e48v00jrnj6hvtk5vqxa4a3ag5729vctsdkasm20cyc----- +-----BEGIN PRIVATE KEY for erd18cc6cm35xhv7kzwsm79l4ma6jpz3ee5l0yjxuc66kh6rcgtawtuq6lzp9f----- +YWZjMGYzNmIwNWY3NGIwOGYyOWViMzMwZjkwZmU1ZTFmNmI4OWFlZDBkYzBjNjlk +OGY1NjJmMTk2MzA2ZWJiZDNlMzFhYzZlMzQzNWQ5ZWIwOWQwZGY4YmZhZWZiYTkw +NDUxY2U2OWY3OTI0NmU2MzVhYjVmNDNjMjE3ZDcyZjg= +-----END PRIVATE KEY for erd18cc6cm35xhv7kzwsm79l4ma6jpz3ee5l0yjxuc66kh6rcgtawtuq6lzp9f----- +-----BEGIN PRIVATE KEY for erd1psux99h4jljyt3nkw8pruv3spw5r0unqe4wk8837mm9my88gl28qj6mml5----- +YWMwMTM4NjU1MDVhMzM5MTEwZDJhOGI4N2E5ZDc3YWVlYjJiYmVjNjkwZjEzOWI3 +YjUwMDNkZTQzYzBjZDM2YzBjMzg2Mjk2ZjU5N2U0NDVjNjc2NzFjMjNlMzIzMDBi +YTgzN2YyNjBjZDVkNjM5ZTNlZGVjYmIyMWNlOGZhOGU= +-----END PRIVATE KEY for erd1psux99h4jljyt3nkw8pruv3spw5r0unqe4wk8837mm9my88gl28qj6mml5----- +-----BEGIN PRIVATE KEY for erd1vgm89ngmv2ghzsyq8xjtt45crekkxnhsq30yxzlq86uc3ra3r57qa3mw2p----- +N2E4YTViOGMzYjI3OWRmODMwYTkwNDI2YjI4MzU0NjE2MWJjOWIzN2NlYmE3Zjcy +NzFkYjk3YmIxZDM3YjUzZDYyMzY3MmNkMWI2MjkxNzE0MDgwMzlhNGI1ZDY5ODFl +NmQ2MzRlZjAwNDVlNDMwYmUwM2ViOTg4OGZiMTFkM2M= +-----END PRIVATE KEY for erd1vgm89ngmv2ghzsyq8xjtt45crekkxnhsq30yxzlq86uc3ra3r57qa3mw2p----- +-----BEGIN PRIVATE KEY for erd1k767vmmn8vg8xvuny32ppwr4dxrlgmpykn0u7nm92evlag3wkukqdgsf5u----- +ODkxZjVhZTdhOGE0ZTdiMDAxNzBmZWM1NGFhN2FjZDgzNDVlZGJlYjc4M2UwZDUw +ZTEwNGUyZmZlY2U2MTMwYWI3YjVlNjZmNzMzYjEwNzMzMzkzMjQ1NDEwYjg3NTY5 +ODdmNDZjMjRiNGRmY2Y0ZjY1NTY1OWZlYTIyZWI3MmM= +-----END PRIVATE KEY for erd1k767vmmn8vg8xvuny32ppwr4dxrlgmpykn0u7nm92evlag3wkukqdgsf5u----- +-----BEGIN PRIVATE KEY for erd1hwe8lskmzsdpuy3f6hldamvn0zrhzldec8m4tt8hupq58d7gyrequy8wsp----- +NjE0ZDc2YWVjOGE1MmI3NWU2MDI5ZWM4YjcyZWU1MTY1Mzg1OGQ2MzM4MmM1MmZl +MDc2MzI3ZWYxYTg1ZDk3ZGJiYjI3ZmMyZGIxNDFhMWUxMjI5ZDVmZWRlZWQ5Mzc4 +ODc3MTdkYjljMWY3NTVhY2Y3ZTA0MTQzYjdjODIwZjI= +-----END PRIVATE KEY for erd1hwe8lskmzsdpuy3f6hldamvn0zrhzldec8m4tt8hupq58d7gyrequy8wsp----- +-----BEGIN PRIVATE KEY for erd125eyrjk99zadr04gm9z2p4nckmnegexs5nyk7ek85rut2665t75sql3w88----- +ZWQ2YTFjNzAyMGMzMDc3ZGU5MGIzMTEyY2Y3NTAyYTgwNWM1MmQ0MDdhNWMyMDRj +NmYyNmNhNDNiNWEzYWU4OTU1MzI0MWNhYzUyOGJhZDFiZWE4ZDk0NGEwZDY3OGI2 +ZTc5NDY0ZDBhNGM5NmY2NmM3YTBmOGI1NmI1NDVmYTk= +-----END PRIVATE KEY for erd125eyrjk99zadr04gm9z2p4nckmnegexs5nyk7ek85rut2665t75sql3w88----- +-----BEGIN PRIVATE KEY for erd17ndrqg38lqf2zjgeqvle90rsn9ejrd9upx8evkyvh8e0m5xlph5scv9l6n----- +YzU3YjdlZGZkZWE3Nzk2MWI0N2Y1YmFkYmYzMTc0M2MwMmRmNjMzOGIyMWExYjFk +M2E5NWQyYWE2NmZkMjgzNWY0ZGEzMDIyMjdmODEyYTE0OTE5MDMzZjkyYmM3MDk5 +NzMyMWI0YmMwOThmOTY1ODhjYjlmMmZkZDBkZjBkZTk= +-----END PRIVATE KEY for erd17ndrqg38lqf2zjgeqvle90rsn9ejrd9upx8evkyvh8e0m5xlph5scv9l6n----- +-----BEGIN PRIVATE KEY for erd1zed89c8226rs7f59zh2xea39qk9ym9tsmt4s0sg2uw7u9nvtzt3q8fdj2e----- +ZTE5MGYwNDU0NjA0ZTI4ZjI5NzVlN2U5YTY1M2VhYjM2ZTdlOWRiZGEzYzQ2NjVk +MTk2MmMxMGMwZTU3Mjg3NzE2NWE3MmUwZWE1Njg3MGYyNjg1MTVkNDZjZjYyNTA1 +OGE0ZDk1NzBkYWViMDdjMTBhZTNiZGMyY2Q4YjEyZTI= +-----END PRIVATE KEY for erd1zed89c8226rs7f59zh2xea39qk9ym9tsmt4s0sg2uw7u9nvtzt3q8fdj2e----- diff --git a/cmd/node/flags.go b/cmd/node/flags.go index 7f610b8d130..72c86c04f96 100644 --- a/cmd/node/flags.go +++ b/cmd/node/flags.go @@ -633,7 +633,8 @@ func applyCompatibleConfigs(log logger.Logger, configs *config.Configs) error { isInHistoricalBalancesMode := operationmodes.SliceContainsElement(operationModes, operationmodes.OperationModeHistoricalBalances) if isInHistoricalBalancesMode { - processHistoricalBalancesMode(log, configs) + // TODO move all operation modes settings in the common/operationmodes package and add tests + operationmodes.ProcessHistoricalBalancesMode(log, configs) } isInDbLookupExtensionMode := operationmodes.SliceContainsElement(operationModes, operationmodes.OperationModeDbLookupExtension) @@ -649,28 +650,6 @@ func applyCompatibleConfigs(log logger.Logger, configs *config.Configs) error { return nil } -func processHistoricalBalancesMode(log logger.Logger, configs *config.Configs) { - configs.GeneralConfig.StoragePruning.Enabled = true - configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData = false - configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData = false - configs.GeneralConfig.GeneralSettings.StartInEpochEnabled = false - configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData = false - configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled = false - configs.GeneralConfig.DbLookupExtensions.Enabled = true - configs.PreferencesConfig.Preferences.FullArchive = true - - log.Warn("the node is in historical balances mode! Will auto-set some config values", - "StoragePruning.Enabled", configs.GeneralConfig.StoragePruning.Enabled, - "StoragePruning.ValidatorCleanOldEpochsData", configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData, - "StoragePruning.ObserverCleanOldEpochsData", configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData, - "StoragePruning.AccountsTrieCleanOldEpochsData", configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData, - "GeneralSettings.StartInEpochEnabled", configs.GeneralConfig.GeneralSettings.StartInEpochEnabled, - "StateTriesConfig.AccountsStatePruningEnabled", configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled, - "DbLookupExtensions.Enabled", configs.GeneralConfig.DbLookupExtensions.Enabled, - "Preferences.FullArchive", configs.PreferencesConfig.Preferences.FullArchive, - ) -} - func processDbLookupExtensionMode(log logger.Logger, configs *config.Configs) { configs.GeneralConfig.DbLookupExtensions.Enabled = true configs.GeneralConfig.StoragePruning.Enabled = true diff --git a/cmd/node/main.go b/cmd/node/main.go index 65fe1165a43..c7cc3c1085c 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -6,6 +6,7 @@ import ( "runtime" "time" + "github.com/klauspost/cpuid/v2" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/cmd/node/factory" @@ -46,10 +47,13 @@ VERSION: // appVersion should be populated at build time using ldflags // Usage examples: // linux/mac: -// go build -v -ldflags="-X main.appVersion=$(git describe --tags --long --dirty)" +// +// go build -v -ldflags="-X main.appVersion=$(git describe --tags --long --dirty)" +// // windows: -// for /f %i in ('git describe --tags --long --dirty') do set VERS=%i -// go build -v -ldflags="-X main.appVersion=%VERS%" +// +// for /f %i in ('git describe --tags --long --dirty') do set VERS=%i +// go build -v -ldflags="-X main.appVersion=%VERS%" var appVersion = common.UnVersionedAppString func main() { @@ -129,6 +133,11 @@ func startNodeRunner(c *cli.Context, log logger.Logger, baseVersion string, vers cfgs.FlagsConfig.BaseVersion = baseVersion cfgs.FlagsConfig.Version = version + err = checkHardwareRequirements(cfgs.GeneralConfig.HardwareRequirements) + if err != nil { + return fmt.Errorf("Hardware Requirements checks failed: %s", err.Error()) + } + nodeRunner, errRunner := node.NewNodeRunner(cfgs) if errRunner != nil { return errRunner @@ -301,3 +310,29 @@ func attachFileLogger(log logger.Logger, flagsConfig *config.ContextFlagsConfig) return fileLogging, nil } + +func checkHardwareRequirements(cfg config.HardwareRequirementsConfig) error { + cpuFlags, err := parseFeatures(cfg.CPUFlags) + if err != nil { + return err + } + + if !cpuid.CPU.Supports(cpuFlags...) { + return fmt.Errorf("CPU Flags: Streaming SIMD Extensions 4 required") + } + + return nil +} + +func parseFeatures(features []string) ([]cpuid.FeatureID, error) { + flags := make([]cpuid.FeatureID, 0) + + for _, cpuFlag := range features { + featureID := cpuid.ParseFeature(cpuFlag) + if featureID == cpuid.UNKNOWN { + return nil, fmt.Errorf("CPU Flags: cpu flag %s not found", cpuFlag) + } + } + + return flags, nil +} diff --git a/cmd/seednode/config/p2p.toml b/cmd/seednode/config/p2p.toml index 2c1a92717c9..41db3a2196f 100644 --- a/cmd/seednode/config/p2p.toml +++ b/cmd/seednode/config/p2p.toml @@ -22,10 +22,11 @@ [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" # TCP listen address PreventPortReuse = true # seeder nodes will need to enable this option - [Node.ResourceLimiter] - Type = "default with manual scale" - ManualSystemMemoryInMB = 65536 # pretend that the host running the seeder has more RAM so it can handle more connections - ManualMaximumFD = 1048576 + + [Node.ResourceLimiter] + Type = "default with manual scale" + ManualSystemMemoryInMB = 65536 # pretend that the host running the seeder has more RAM so it can handle more connections + ManualMaximumFD = 1048576 # P2P peer discovery section @@ -47,9 +48,12 @@ #RefreshIntervalInSec represents the time in seconds between querying for new peers RefreshIntervalInSec = 10 - #ProtocolID represents the protocol that this node will advertize to other peers - #To connect to other nodes, those nodes should have the same ProtocolID string - ProtocolID = "/erd/kad/1.0.0" + # ProtocolIDs represents the protocols that this node will advertise to other peers + # To connect to other nodes, those nodes should have at least one common protocol string + ProtocolIDs = [ + "/erd/kad/1.0.0", + "mvx-main", + ] #InitialPeerList represents the list of strings of some known nodes that will bootstrap this node #The address will be in a self-describing addressing format. diff --git a/cmd/seednode/main.go b/cmd/seednode/main.go index c881fb2a752..ee083fde21d 100644 --- a/cmd/seednode/main.go +++ b/cmd/seednode/main.go @@ -309,12 +309,21 @@ func displayMessengerInfo(messenger p2p.Messenger) { return strings.Compare(mesConnectedAddrs[i], mesConnectedAddrs[j]) < 0 }) - log.Info("known peers", "num peers", len(messenger.Peers())) - headerConnectedAddresses := []string{fmt.Sprintf("Seednode is connected to %d peers:", len(mesConnectedAddrs))} + protocolIDString := "Valid protocol ID?" + log.Info("peers info", "num known peers", len(messenger.Peers()), "num connected peers", len(mesConnectedAddrs)) + headerConnectedAddresses := []string{"Connected peers", protocolIDString} connAddresses := make([]*display.LineData, len(mesConnectedAddrs)) + yesMarker := "yes" + yesMarker = strings.Repeat(" ", (len(protocolIDString)-len(yesMarker))/2) + yesMarker // add padding + noMarker := "!!! no !!!" + noMarker = strings.Repeat(" ", (len(protocolIDString)-len(noMarker))/2) + noMarker // add padding for idx, address := range mesConnectedAddrs { - connAddresses[idx] = display.NewLineData(false, []string{address}) + marker := noMarker + if messenger.HasCompatibleProtocolID(address) { + marker = yesMarker + } + connAddresses[idx] = display.NewLineData(false, []string{address, marker}) } tbl2, _ := display.CreateTableString(headerConnectedAddresses, connAddresses) diff --git a/cmd/termui/presenter/presenterStatusHandler.go b/cmd/termui/presenter/presenterStatusHandler.go index 6ad88f98e4d..1722eedbcb4 100644 --- a/cmd/termui/presenter/presenterStatusHandler.go +++ b/cmd/termui/presenter/presenterStatusHandler.go @@ -6,7 +6,7 @@ import ( "sync" ) -//maxLogLines is used to specify how many lines of logs need to store in slice +// maxLogLines is used to specify how many lines of logs need to store in slice var maxLogLines = 100 // PresenterStatusHandler is the AppStatusHandler impl that is able to process and store received data diff --git a/cmd/termui/view/termuic/interface.go b/cmd/termui/view/termuic/interface.go index ecc3e618da6..63384792e6b 100644 --- a/cmd/termui/view/termuic/interface.go +++ b/cmd/termui/view/termuic/interface.go @@ -1,6 +1,6 @@ package termuic -//TermuiRender defines the actions which should be handled by a render +// TermuiRender defines the actions which should be handled by a render type TermuiRender interface { // RefreshData method is used to refresh data that are displayed on a grid RefreshData(numMillisecondsRefreshTime int) diff --git a/cmd/termui/view/termuic/termuiRenders/drawableContainer.go b/cmd/termui/view/termuic/termuiRenders/drawableContainer.go index 4964c9d6a85..f21472b2185 100644 --- a/cmd/termui/view/termuic/termuiRenders/drawableContainer.go +++ b/cmd/termui/view/termuic/termuiRenders/drawableContainer.go @@ -17,7 +17,7 @@ type DrawableContainer struct { maxHeight int } -//NewDrawableContainer method is used to return a new NewDrawableContainer structure +// NewDrawableContainer method is used to return a new NewDrawableContainer structure func NewDrawableContainer() *DrawableContainer { dc := DrawableContainer{} return &dc diff --git a/common/constants.go b/common/constants.go index cacc0f99d7f..b39f99cbf62 100644 --- a/common/constants.go +++ b/common/constants.go @@ -43,6 +43,14 @@ const NewList PeerType = "new" // MetachainTopicIdentifier is the identifier used in topics to define the metachain shard ID const MetachainTopicIdentifier = "META" // TODO - move this to mx-chain-core-go and change wherever we use the string value +// AuctionList represents the list of peers which don't participate in consensus yet, but will be selected +// based on their top up stake +const AuctionList PeerType = "auction" + +// SelectedFromAuctionList represents the list of peers which have been selected from AuctionList based on +// their top up to be distributed on the WaitingList in the next epoch +const SelectedFromAuctionList PeerType = "selectedFromAuction" + // CombinedPeerType - represents the combination of two peerTypes const CombinedPeerType = "%s (%s)" @@ -92,6 +100,9 @@ const MetricCurrentRound = "erd_current_round" // MetricNonce is the metric for monitoring the nonce of a node const MetricNonce = "erd_nonce" +// MetricBlockTimestamp is the metric for monitoring the timestamp of the last synchronized block +const MetricBlockTimestamp = "erd_block_timestamp" + // MetricProbableHighestNonce is the metric for monitoring the max speculative nonce received by the node by listening on the network const MetricProbableHighestNonce = "erd_probable_highest_nonce" @@ -309,6 +320,9 @@ const MetricRedundancyLevel = "erd_redundancy_level" // MetricRedundancyIsMainActive is the metric that specifies data about the redundancy main machine const MetricRedundancyIsMainActive = "erd_redundancy_is_main_active" +// MetricRedundancyStepInReason is the metric that specifies why the back-up machine stepped in +const MetricRedundancyStepInReason = "erd_redundancy_step_in_reason" + // MetricValueNA represents the value to be used when a metric is not available/applicable const MetricValueNA = "N/A" @@ -481,6 +495,12 @@ const ( // MetricRelayedTransactionsV2EnableEpoch represents the epoch when the relayed transactions v2 is enabled MetricRelayedTransactionsV2EnableEpoch = "erd_relayed_transactions_v2_enable_epoch" + // MetricRelayedTransactionsV3EnableEpoch represents the epoch when the relayed transactions v3 is enabled + MetricRelayedTransactionsV3EnableEpoch = "erd_relayed_transactions_v3_enable_epoch" + + // MetricFixRelayedBaseCostEnableEpoch represents the epoch when the fix for relayed base cost is enabled + MetricFixRelayedBaseCostEnableEpoch = "erd_fix_relayed_base_cost_enable_epoch" + // MetricUnbondTokensV2EnableEpoch represents the epoch when the unbond tokens v2 is applied MetricUnbondTokensV2EnableEpoch = "erd_unbond_tokens_v2_enable_epoch" @@ -496,6 +516,9 @@ const ( // MetricIncrementSCRNonceInMultiTransferEnableEpoch represents the epoch when the fix for multi transfer SCR is enabled MetricIncrementSCRNonceInMultiTransferEnableEpoch = "erd_increment_scr_nonce_in_multi_transfer_enable_epoch" + // MetricScheduledMiniBlocksEnableEpoch represents the epoch when the scheduled miniblocks feature is enabled + MetricScheduledMiniBlocksEnableEpoch = "erd_scheduled_miniblocks_enable_epoch" + // MetricESDTMultiTransferEnableEpoch represents the epoch when the ESDT multi transfer feature is enabled MetricESDTMultiTransferEnableEpoch = "erd_esdt_multi_transfer_enable_epoch" @@ -505,11 +528,214 @@ const ( // MetricESDTTransferRoleEnableEpoch represents the epoch when the ESDT transfer role feature is enabled MetricESDTTransferRoleEnableEpoch = "erd_esdt_transfer_role_enable_epoch" - // MetricBuiltInFunctionOnMetaEnableEpoch represents the epoch when the builtin functions on metachain are enabled - MetricBuiltInFunctionOnMetaEnableEpoch = "erd_builtin_function_on_meta_enable_epoch" + // MetricComputeRewardCheckpointEnableEpoch represents the epoch when compute reward checkpoint feature is enabled + MetricComputeRewardCheckpointEnableEpoch = "erd_compute_reward_checkpoint_enable_epoch" + + // MetricSCRSizeInvariantCheckEnableEpoch represents the epoch when scr size invariant check is enabled + MetricSCRSizeInvariantCheckEnableEpoch = "erd_scr_size_invariant_check_enable_epoch" + + // MetricBackwardCompSaveKeyValueEnableEpoch represents the epoch when backward compatibility save key valu is enabled + MetricBackwardCompSaveKeyValueEnableEpoch = "erd_backward_comp_save_keyvalue_enable_epoch" + + // MetricESDTNFTCreateOnMultiShardEnableEpoch represents the epoch when esdt nft create on multi shard is enabled + MetricESDTNFTCreateOnMultiShardEnableEpoch = "erd_esdt_nft_create_on_multi_shard_enable_epoch" + + // MetricMetaESDTSetEnableEpoch represents the epoch when meta esdt set is enabled + MetricMetaESDTSetEnableEpoch = "erd_meta_esdt_set_enable_epoch" + + // MetricAddTokensToDelegationEnableEpoch represents the epoch when add tokens to delegation + MetricAddTokensToDelegationEnableEpoch = "erd_add_tokens_to_delegation_enable_epoch" + + // MetricMultiESDTTransferFixOnCallBackOnEnableEpoch represents the epoch when multi esdt transfer fix on callback on is enabled + MetricMultiESDTTransferFixOnCallBackOnEnableEpoch = "erd_multi_esdt_transfer_fix_on_callback_enable_epoch" + + // MetricOptimizeGasUsedInCrossMiniBlocksEnableEpoch represents the epoch when optimize gas used in cross miniblocks is enabled + MetricOptimizeGasUsedInCrossMiniBlocksEnableEpoch = "erd_optimize_gas_used_in_cross_miniblocks_enable_epoch" + // MetricCorrectFirstQueuedEpoch represents the epoch when correct first queued fix is enabled + MetricCorrectFirstQueuedEpoch = "erd_correct_first_queued_enable_epoch" + + // MetricCorrectJailedNotUnstakedEmptyQueueEpoch represents the epoch when correct jailed not unstacked meptry queue fix is enabled + MetricCorrectJailedNotUnstakedEmptyQueueEpoch = "erd_correct_jailed_not_unstaked_empty_queue_enable_epoch" + + // MetricFixOOGReturnCodeEnableEpoch represents the epoch when OOG return code fix is enabled + MetricFixOOGReturnCodeEnableEpoch = "erd_fix_oog_return_code_enable_epoch" + + // MetricRemoveNonUpdatedStorageEnableEpoch represents the epoch when remove non updated storage fix is enabled + MetricRemoveNonUpdatedStorageEnableEpoch = "erd_remove_non_updated_storage_enable_epoch" + + // MetricDeleteDelegatorAfterClaimRewardsEnableEpoch represents the epoch when delete delegator after claim rewards fix is enabled + MetricDeleteDelegatorAfterClaimRewardsEnableEpoch = "erd_delete_delegator_after_claim_rewards_enable_epoch" + + // MetricOptimizeNFTStoreEnableEpoch represents the epoch when optimize nft store feature is enabled + MetricOptimizeNFTStoreEnableEpoch = "erd_optimize_nft_store_enable_epoch" + + // MetricCreateNFTThroughExecByCallerEnableEpoch represents the epoch when create nft through exec by caller functionality is enabled + MetricCreateNFTThroughExecByCallerEnableEpoch = "erd_create_nft_through_exec_by_caller_enable_epoch" + + // MetricStopDecreasingValidatorRatingWhenStuckEnableEpoch represents the epoch when stop decreaing validator rating when stuck functionality is enabled + MetricStopDecreasingValidatorRatingWhenStuckEnableEpoch = "erd_stop_decreasing_validator_rating_when_stuck_enable_epoch" + + // MetricFrontRunningProtectionEnableEpoch represents the epoch when front running protection feature is enabled + MetricFrontRunningProtectionEnableEpoch = "erd_front_running_protection_enable_epoch" + + // MetricIsPayableBySCEnableEpoch represents the epoch when is payable by SC feature is enabled + MetricIsPayableBySCEnableEpoch = "erd_is_payable_by_sc_enable_epoch" + + // MetricCleanUpInformativeSCRsEnableEpoch represents the epoch when cleanup informative scrs functionality is enabled + MetricCleanUpInformativeSCRsEnableEpoch = "erd_cleanup_informative_scrs_enable_epoch" + + // MetricStorageAPICostOptimizationEnableEpoch represents the epoch when storage api cost optimization feature is enabled + MetricStorageAPICostOptimizationEnableEpoch = "erd_storage_api_cost_optimization_enable_epoch" + + // MetricTransformToMultiShardCreateEnableEpoch represents the epoch when transform to multi shard create functionality is enabled + MetricTransformToMultiShardCreateEnableEpoch = "erd_transform_to_multi_shard_create_enable_epoch" + + // MetricESDTRegisterAndSetAllRolesEnableEpoch represents the epoch when esdt register and set all roles functionality is enabled + MetricESDTRegisterAndSetAllRolesEnableEpoch = "erd_esdt_register_and_set_all_roles_enable_epoch" + + // MetricDoNotReturnOldBlockInBlockchainHookEnableEpoch represents the epoch when do not return old block in blockchain hook fix is enabled + MetricDoNotReturnOldBlockInBlockchainHookEnableEpoch = "erd_do_not_returns_old_block_in_blockchain_hook_enable_epoch" + + // MetricAddFailedRelayedTxToInvalidMBsDisableEpoch represents the epoch when add failed relayed tx to invalid miniblocks functionality is enabled + MetricAddFailedRelayedTxToInvalidMBsDisableEpoch = "erd_add_failed_relayed_tx_to_invalid_mbs_enable_epoch" + + // MetricSCRSizeInvariantOnBuiltInResultEnableEpoch represents the epoch when scr size invariant on builtin result functionality is enabled + MetricSCRSizeInvariantOnBuiltInResultEnableEpoch = "erd_scr_size_invariant_on_builtin_result_enable_epoch" + + // MetricCheckCorrectTokenIDForTransferRoleEnableEpoch represents the epoch when check correct tokenID for transfer role fix is enabled + MetricCheckCorrectTokenIDForTransferRoleEnableEpoch = "erd_check_correct_tokenid_for_transfer_role_enable_epoch" + + // MetricDisableExecByCallerEnableEpoch represents the epoch when disable exec by caller functionality is enabled + MetricDisableExecByCallerEnableEpoch = "erd_disable_exec_by_caller_enable_epoch" + + // MetricFailExecutionOnEveryAPIErrorEnableEpoch represents the epoch when fail execution on every api error functionality is enabled + MetricFailExecutionOnEveryAPIErrorEnableEpoch = "erd_fail_execution_on_every_api_error_enable_epoch" + + // MetricManagedCryptoAPIsEnableEpoch represents the epoch when managed cypto apis functionality is enabled + MetricManagedCryptoAPIsEnableEpoch = "erd_managed_crypto_apis_enable_epoch" + + // MetricRefactorContextEnableEpoch represents the epoch when refactor context functionality is enabled + MetricRefactorContextEnableEpoch = "erd_refactor_context_enable_epoch" + + // MetricCheckFunctionArgumentEnableEpoch represents the epoch when check function argument functionality is enabled + MetricCheckFunctionArgumentEnableEpoch = "erd_check_function_argument_enable_epoch" + + // MetricCheckExecuteOnReadOnlyEnableEpoch represents the epoch when check execute on read only fix is enabled + MetricCheckExecuteOnReadOnlyEnableEpoch = "erd_check_execute_on_readonly_enable_epoch" + + // MetricMiniBlockPartialExecutionEnableEpoch represents the epoch when miniblock partial execution feature is enabled + MetricMiniBlockPartialExecutionEnableEpoch = "erd_miniblock_partial_execution_enable_epoch" + + // MetricESDTMetadataContinuousCleanupEnableEpoch represents the epoch when esdt metadata continuous clenaup functionality is enabled + MetricESDTMetadataContinuousCleanupEnableEpoch = "erd_esdt_metadata_continuous_cleanup_enable_epoch" + + // MetricFixAsyncCallBackArgsListEnableEpoch represents the epoch when fix async callback args list is enabled + MetricFixAsyncCallBackArgsListEnableEpoch = "erd_fix_async_callback_args_list_enable_epoch" + + // MetricFixOldTokenLiquidityEnableEpoch represents the epoch when fix old token liquidity is enabled + MetricFixOldTokenLiquidityEnableEpoch = "erd_fix_old_token_liquidity_enable_epoch" + + // MetricRuntimeMemStoreLimitEnableEpoch represents the epoch when runtime mem store limit functionality is enabled + MetricRuntimeMemStoreLimitEnableEpoch = "erd_runtime_mem_store_limit_enable_epoch" + + // MetricRuntimeCodeSizeFixEnableEpoch represents the epoch when runtime code size fix is enabled + MetricRuntimeCodeSizeFixEnableEpoch = "erd_runtime_code_size_fix_enable_epoch" + + // MetricSetSenderInEeiOutputTransferEnableEpoch represents the epoch when set sender in eei output transfer functionality is enabled + MetricSetSenderInEeiOutputTransferEnableEpoch = "erd_set_sender_in_eei_output_transfer_enable_epoch" + + // MetricRefactorPeersMiniBlocksEnableEpoch represents the epoch when refactor peers miniblock feature is enabled + MetricRefactorPeersMiniBlocksEnableEpoch = "erd_refactor_peers_miniblocks_enable_epoch" + + // MetricSCProcessorV2EnableEpoch represents the epoch when SC processor V2 feature is enabled + MetricSCProcessorV2EnableEpoch = "erd_sc_processorv2_enable_epoch" - // MetricWaitingListFixEnableEpoch represents the epoch when the waiting list fix is enabled - MetricWaitingListFixEnableEpoch = "erd_waiting_list_fix_enable_epoch" + // MetricMaxBlockchainHookCountersEnableEpoch represents the epoch when max blockchain hook counters functionality is enabled + MetricMaxBlockchainHookCountersEnableEpoch = "erd_max_blockchain_hook_counters_enable_epoch" + + // MetricWipeSingleNFTLiquidityDecreaseEnableEpoch represents the epoch when wipe single NFT liquidity decrease functionality is enabled + MetricWipeSingleNFTLiquidityDecreaseEnableEpoch = "erd_wipe_single_nft_liquidity_decrease_enable_epoch" + + // MetricAlwaysSaveTokenMetaDataEnableEpoch represents the epoch when always save token metadata functionality is enabled + MetricAlwaysSaveTokenMetaDataEnableEpoch = "erd_always_save_token_metadata_enable_epoch" + + // MetricSetGuardianEnableEpoch represents the epoch when the guardian feature is enabled + MetricSetGuardianEnableEpoch = "erd_set_guardian_feature_enable_epoch" + + // MetricSetScToScLogEventEnableEpoch represents the epoch when the sc to sc log event feature is enabled + MetricSetScToScLogEventEnableEpoch = "erd_set_sc_to_sc_log_event_enable_epoch" + + // MetricRelayedNonceFixEnableEpoch represents the epoch when relayed nonce fix is enabled + MetricRelayedNonceFixEnableEpoch = "erd_relayed_nonce_fix_enable_epoch" + + // MetricDeterministicSortOnValidatorsInfoEnableEpoch represents the epoch when deterministic sort on validators info functionality is enabled + MetricDeterministicSortOnValidatorsInfoEnableEpoch = "erd_deterministic_sort_on_validators_info_enable_epoch" + + // MetricKeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when keep exec order on created scs fix is enabled + MetricKeepExecOrderOnCreatedSCRsEnableEpoch = "erd_keep_exec_order_on_created_scrs_enable_epoch" + + // MetricMultiClaimOnDelegationEnableEpoch represents the epoch when multi claim on delegation functionality is enabled + MetricMultiClaimOnDelegationEnableEpoch = "erd_multi_claim_on_delegation_enable_epoch" + + // MetricChangeUsernameEnableEpoch represents the epoch when change username functionality is enabled + MetricChangeUsernameEnableEpoch = "erd_change_username_enable_epoch" + + // MetricAutoBalanceDataTriesEnableEpoch represents the epoch when auto balance data tries feature is enabled + MetricAutoBalanceDataTriesEnableEpoch = "erd_auto_balance_data_tries_enable_epoch" + + // MetricMigrateDataTrieEnableEpoch represents the epoch when migrate data trie feature is enabled + MetricMigrateDataTrieEnableEpoch = "erd_migrate_datatrie_enable_epoch" + + // MetricConsistentTokensValuesLengthCheckEnableEpoch represents the epoch when consistent tokens values length check is enabled + MetricConsistentTokensValuesLengthCheckEnableEpoch = "erd_consistent_tokens_values_length_check_enable_epoch" + + // MetricFixDelegationChangeOwnerOnAccountEnableEpoch represents the epoch when fix delegation change owner on account is enabled + MetricFixDelegationChangeOwnerOnAccountEnableEpoch = "erd_fix_delegation_change_owner_on_account_enable_epoch" + + // MetricDynamicGasCostForDataTrieStorageLoadEnableEpoch represents the epoch when dynamic gas cost for data tries storage load functionality is enabled + MetricDynamicGasCostForDataTrieStorageLoadEnableEpoch = "erd_dynamic_gas_cost_for_datatrie_storage_load_enable_epoch" + + // MetricNFTStopCreateEnableEpoch represents the epoch when NFT stop create functionality is enabled + MetricNFTStopCreateEnableEpoch = "erd_nft_stop_create_enable_epoch" + + // MetricChangeOwnerAddressCrossShardThroughSCEnableEpoch represents the epoch when change owner address cross shard through SC functionality is enabled + MetricChangeOwnerAddressCrossShardThroughSCEnableEpoch = "erd_change_owner_address_cross_shard_through_sc_enable_epoch" + + // MetricFixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch represents the epoch when fix gas remaining for save key value builin function is enabled + MetricFixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch = "erd_fix_gas_remainig_for_save_keyvalue_builtin_function_enable_epoch" + + // MetricCurrentRandomnessOnSortingEnableEpoch represents the epoch when current randomness on sorting functionality is enabled + MetricCurrentRandomnessOnSortingEnableEpoch = "erd_current_randomness_on_sorting_enable_epoch" + + // MetricStakeLimitsEnableEpoch represents the epoch when stake limits functionality is enabled + MetricStakeLimitsEnableEpoch = "erd_stake_limits_enable_epoch" + + // MetricStakingV4Step1EnableEpoch represents the epoch when staking v4 step 1 feature is enabled + MetricStakingV4Step1EnableEpoch = "erd_staking_v4_step1_enable_epoch" + + // MetricStakingV4Step2EnableEpoch represents the epoch when staking v4 step 2 feature is enabled + MetricStakingV4Step2EnableEpoch = "erd_staking_v4_step2_enable_epoch" + + // MetricStakingV4Step3EnableEpoch represents the epoch when staking v4 step 3 feature is enabled + MetricStakingV4Step3EnableEpoch = "erd_staking_v4_step3_enable_epoch" + + // MetricCleanupAuctionOnLowWaitingListEnableEpoch represents the epoch when cleanup auction on low waiting list fix is enabled + MetricCleanupAuctionOnLowWaitingListEnableEpoch = "erd_cleanup_auction_on_low_waiting_list_enable_epoch" + + // MetricAlwaysMergeContextsInEEIEnableEpoch represents the epoch when always merge contexts in EEI fix is enabled + MetricAlwaysMergeContextsInEEIEnableEpoch = "erd_always_merge_contexts_in_eei_enable_epoch" + + // MetricDynamicESDTEnableEpoch represents the epoch when dynamic ESDT feature is enabled + MetricDynamicESDTEnableEpoch = "erd_dynamic_esdt_enable_epoch" + + // MetricEGLDInMultiTransferEnableEpoch represents the epoch when EGLD in multi transfer feature is enabled + MetricEGLDInMultiTransferEnableEpoch = "erd_egld_in_multi_transfer_enable_epoch" + + // MetricCryptoOpcodesV2EnableEpoch represents the epoch when crypto opcodes v2 feature is enabled + MetricCryptoOpcodesV2EnableEpoch = "erd_crypto_opcodes_v2_enable_epoch" + + // MetricMultiESDTNFTTransferAndExecuteByUserEnableEpoch represents the epoch when enshrined sovereign opcodes are enabled + MetricMultiESDTNFTTransferAndExecuteByUserEnableEpoch = "erd_multi_esdt_transfer_execute_by_user_enable_epoch" // MetricMaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MetricMaxNodesChangeEnableEpoch = "erd_max_nodes_change_enable_epoch" @@ -531,12 +757,6 @@ const ( // NodesToShufflePerShardSuffix represents the suffix for NodesToShufflePerShard item in MaxNodesChangeEnableEpoch list NodesToShufflePerShardSuffix = "_nodes_to_shuffle_per_shard" - - // MetricHysteresis represents the hysteresis threshold - MetricHysteresis = "erd_hysteresis" - - // MetricAdaptivity represents a boolean to determine if adaptivity will be enabled or not - MetricAdaptivity = "erd_adaptivity" ) const ( @@ -615,11 +835,11 @@ const ( // MetricRatingsPeerHonestyUnitValue represents the peer honesty unit value MetricRatingsPeerHonestyUnitValue = "erd_ratings_peerhonesty_unit_value" - // MetricSetGuardianEnableEpoch represents the epoch when the guardian feature is enabled - MetricSetGuardianEnableEpoch = "erd_set_guardian_feature_enable_epoch" + // MetricHysteresis represents the hysteresis threshold + MetricHysteresis = "erd_hysteresis" - // MetricSetScToScLogEventEnableEpoch represents the epoch when the sc to sc log event feature is enabled - MetricSetScToScLogEventEnableEpoch = "erd_set_sc_to_sc_log_event_enable_epoch" + // MetricAdaptivity represents a boolean to determine if adaptivity will be enabled or not + MetricAdaptivity = "erd_adaptivity" ) const ( @@ -890,6 +1110,7 @@ const MetricTrieSyncNumProcessedNodes = "erd_trie_sync_num_nodes_processed" // FullArchiveMetricSuffix is the suffix added to metrics specific for full archive network const FullArchiveMetricSuffix = "_full_archive" +// Enable epoch flags definitions const ( SCDeployFlag core.EnableEpochFlag = "SCDeployFlag" BuiltInFunctionsFlag core.EnableEpochFlag = "BuiltInFunctionsFlag" @@ -930,7 +1151,6 @@ const ( ESDTMultiTransferFlag core.EnableEpochFlag = "ESDTMultiTransferFlag" GlobalMintBurnFlag core.EnableEpochFlag = "GlobalMintBurnFlag" ESDTTransferRoleFlag core.EnableEpochFlag = "ESDTTransferRoleFlag" - BuiltInFunctionOnMetaFlag core.EnableEpochFlag = "BuiltInFunctionOnMetaFlag" ComputeRewardCheckpointFlag core.EnableEpochFlag = "ComputeRewardCheckpointFlag" SCRSizeInvariantCheckFlag core.EnableEpochFlag = "SCRSizeInvariantCheckFlag" BackwardCompSaveKeyValueFlag core.EnableEpochFlag = "BackwardCompSaveKeyValueFlag" @@ -971,7 +1191,6 @@ const ( SendAlwaysFlag core.EnableEpochFlag = "SendAlwaysFlag" ValueLengthCheckFlag core.EnableEpochFlag = "ValueLengthCheckFlag" CheckTransferFlag core.EnableEpochFlag = "CheckTransferFlag" - TransferToMetaFlag core.EnableEpochFlag = "TransferToMetaFlag" ESDTNFTImprovementV1Flag core.EnableEpochFlag = "ESDTNFTImprovementV1Flag" ChangeDelegationOwnerFlag core.EnableEpochFlag = "ChangeDelegationOwnerFlag" RefactorPeersMiniBlocksFlag core.EnableEpochFlag = "RefactorPeersMiniBlocksFlag" @@ -990,6 +1209,7 @@ const ( MultiClaimOnDelegationFlag core.EnableEpochFlag = "MultiClaimOnDelegationFlag" ChangeUsernameFlag core.EnableEpochFlag = "ChangeUsernameFlag" AutoBalanceDataTriesFlag core.EnableEpochFlag = "AutoBalanceDataTriesFlag" + MigrateDataTrieFlag core.EnableEpochFlag = "MigrateDataTrieFlag" FixDelegationChangeOwnerOnAccountFlag core.EnableEpochFlag = "FixDelegationChangeOwnerOnAccountFlag" FixOOGReturnCodeFlag core.EnableEpochFlag = "FixOOGReturnCodeFlag" DeterministicSortOnValidatorsInfoFixFlag core.EnableEpochFlag = "DeterministicSortOnValidatorsInfoFixFlag" @@ -997,10 +1217,23 @@ const ( ScToScLogEventFlag core.EnableEpochFlag = "ScToScLogEventFlag" BlockGasAndFeesReCheckFlag core.EnableEpochFlag = "BlockGasAndFeesReCheckFlag" BalanceWaitingListsFlag core.EnableEpochFlag = "BalanceWaitingListsFlag" - WaitingListFixFlag core.EnableEpochFlag = "WaitingListFixFlag" NFTStopCreateFlag core.EnableEpochFlag = "NFTStopCreateFlag" FixGasRemainingForSaveKeyValueFlag core.EnableEpochFlag = "FixGasRemainingForSaveKeyValueFlag" IsChangeOwnerAddressCrossShardThroughSCFlag core.EnableEpochFlag = "IsChangeOwnerAddressCrossShardThroughSCFlag" CurrentRandomnessOnSortingFlag core.EnableEpochFlag = "CurrentRandomnessOnSortingFlag" + StakeLimitsFlag core.EnableEpochFlag = "StakeLimitsFlag" + StakingV4Step1Flag core.EnableEpochFlag = "StakingV4Step1Flag" + StakingV4Step2Flag core.EnableEpochFlag = "StakingV4Step2Flag" + StakingV4Step3Flag core.EnableEpochFlag = "StakingV4Step3Flag" + CleanupAuctionOnLowWaitingListFlag core.EnableEpochFlag = "CleanupAuctionOnLowWaitingListFlag" + StakingV4StartedFlag core.EnableEpochFlag = "StakingV4StartedFlag" + AlwaysMergeContextsInEEIFlag core.EnableEpochFlag = "AlwaysMergeContextsInEEIFlag" + DynamicESDTFlag core.EnableEpochFlag = "DynamicEsdtFlag" + EGLDInESDTMultiTransferFlag core.EnableEpochFlag = "EGLDInESDTMultiTransferFlag" + CryptoOpcodesV2Flag core.EnableEpochFlag = "CryptoOpcodesV2Flag" + UnJailCleanupFlag core.EnableEpochFlag = "UnJailCleanupFlag" + RelayedTransactionsV3Flag core.EnableEpochFlag = "RelayedTransactionsV3Flag" + FixRelayedBaseCostFlag core.EnableEpochFlag = "FixRelayedBaseCostFlag" + MultiESDTNFTTransferAndExecuteByUserFlag core.EnableEpochFlag = "MultiESDTNFTTransferAndExecuteByUserFlag" // all new flags must be added to createAllFlagsMap method, as part of enableEpochsHandler allFlagsDefined ) diff --git a/common/dtos.go b/common/dtos.go index e7876a9131b..50cf1109017 100644 --- a/common/dtos.go +++ b/common/dtos.go @@ -75,3 +75,19 @@ type EpochStartDataAPI struct { type AlteredAccountsForBlockAPIResponse struct { Accounts []*alteredAccount.AlteredAccount `json:"accounts"` } + +// AuctionNode holds data needed for a node in auction to respond to API calls +type AuctionNode struct { + BlsKey string `json:"blsKey"` + Qualified bool `json:"qualified"` +} + +// AuctionListValidatorAPIResponse holds the data needed for an auction node validator for responding to API calls +type AuctionListValidatorAPIResponse struct { + Owner string `json:"owner"` + NumStakedNodes int64 `json:"numStakedNodes"` + TotalTopUp string `json:"totalTopUp"` + TopUpPerNode string `json:"topUpPerNode"` + QualifiedTopUp string `json:"qualifiedTopUp"` + Nodes []*AuctionNode `json:"nodes"` +} diff --git a/common/enablers/enableEpochsHandler.go b/common/enablers/enableEpochsHandler.go index 7e19773707b..02f24d941d1 100644 --- a/common/enablers/enableEpochsHandler.go +++ b/common/enablers/enableEpochsHandler.go @@ -6,10 +6,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" - logger "github.com/multiversx/mx-chain-logger-go" ) var log = logger.GetOrCreate("common/enablers") @@ -287,18 +288,6 @@ func (handler *enableEpochsHandler) createAllFlagsMap() { }, activationEpoch: handler.enableEpochsConfig.ESDTTransferRoleEnableEpoch, }, - common.BuiltInFunctionOnMetaFlag: { - isActiveInEpoch: func(epoch uint32) bool { - return epoch >= handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch - }, - activationEpoch: handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch, - }, - common.TransferToMetaFlag: { - isActiveInEpoch: func(epoch uint32) bool { - return epoch >= handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch - }, - activationEpoch: handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch, - }, common.ComputeRewardCheckpointFlag: { isActiveInEpoch: func(epoch uint32) bool { return epoch >= handler.enableEpochsConfig.ComputeRewardCheckpointEnableEpoch @@ -641,6 +630,12 @@ func (handler *enableEpochsHandler) createAllFlagsMap() { }, activationEpoch: handler.enableEpochsConfig.AutoBalanceDataTriesEnableEpoch, }, + common.MigrateDataTrieFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MigrateDataTrieEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MigrateDataTrieEnableEpoch, + }, common.FixDelegationChangeOwnerOnAccountFlag: { isActiveInEpoch: func(epoch uint32) bool { return epoch >= handler.enableEpochsConfig.FixDelegationChangeOwnerOnAccountEnableEpoch @@ -683,12 +678,6 @@ func (handler *enableEpochsHandler) createAllFlagsMap() { }, activationEpoch: handler.enableEpochsConfig.BalanceWaitingListsEnableEpoch, }, - common.WaitingListFixFlag: { - isActiveInEpoch: func(epoch uint32) bool { - return epoch >= handler.enableEpochsConfig.WaitingListFixEnableEpoch - }, - activationEpoch: handler.enableEpochsConfig.WaitingListFixEnableEpoch, - }, common.NFTStopCreateFlag: { isActiveInEpoch: func(epoch uint32) bool { return epoch >= handler.enableEpochsConfig.NFTStopCreateEnableEpoch @@ -713,6 +702,90 @@ func (handler *enableEpochsHandler) createAllFlagsMap() { }, activationEpoch: handler.enableEpochsConfig.CurrentRandomnessOnSortingEnableEpoch, }, + common.StakeLimitsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakeLimitsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakeLimitsEnableEpoch, + }, + common.StakingV4Step1Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.StakingV4Step1EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step1EnableEpoch, + }, + common.StakingV4Step2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV4Step2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step2EnableEpoch, + }, + common.StakingV4Step3Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV4Step3EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step3EnableEpoch, + }, + common.CleanupAuctionOnLowWaitingListFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CleanupAuctionOnLowWaitingListEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CleanupAuctionOnLowWaitingListEnableEpoch, + }, + common.StakingV4StartedFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV4Step1EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step1EnableEpoch, + }, + common.AlwaysMergeContextsInEEIFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.AlwaysMergeContextsInEEIEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AlwaysMergeContextsInEEIEnableEpoch, + }, + common.DynamicESDTFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.DynamicESDTEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.DynamicESDTEnableEpoch, + }, + common.EGLDInESDTMultiTransferFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.EGLDInMultiTransferEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.EGLDInMultiTransferEnableEpoch, + }, + common.CryptoOpcodesV2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.CryptoOpcodesV2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.CryptoOpcodesV2EnableEpoch, + }, + common.UnJailCleanupFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.UnJailCleanupEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.UnJailCleanupEnableEpoch, + }, + common.RelayedTransactionsV3Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.RelayedTransactionsV3EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.RelayedTransactionsV3EnableEpoch, + }, + common.FixRelayedBaseCostFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.FixRelayedBaseCostEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.FixRelayedBaseCostEnableEpoch, + }, + common.MultiESDTNFTTransferAndExecuteByUserFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MultiESDTNFTTransferAndExecuteByUserEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MultiESDTNFTTransferAndExecuteByUserEnableEpoch, + }, } } @@ -781,6 +854,16 @@ func (handler *enableEpochsHandler) GetCurrentEpoch() uint32 { return currentEpoch } +// StakingV4Step2EnableEpoch returns the epoch when stakingV4 becomes active +func (handler *enableEpochsHandler) StakingV4Step2EnableEpoch() uint32 { + return handler.enableEpochsConfig.StakingV4Step2EnableEpoch +} + +// StakingV4Step1EnableEpoch returns the epoch when stakingV4 phase1 becomes active +func (handler *enableEpochsHandler) StakingV4Step1EnableEpoch() uint32 { + return handler.enableEpochsConfig.StakingV4Step1EnableEpoch +} + // IsInterfaceNil returns true if there is no value under the interface func (handler *enableEpochsHandler) IsInterfaceNil() bool { return handler == nil diff --git a/common/enablers/enableEpochsHandler_test.go b/common/enablers/enableEpochsHandler_test.go index 4b35120441b..290ace2d6e0 100644 --- a/common/enablers/enableEpochsHandler_test.go +++ b/common/enablers/enableEpochsHandler_test.go @@ -5,13 +5,14 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core/check" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func createEnableEpochsConfig() config.EnableEpochs { @@ -47,12 +48,10 @@ func createEnableEpochsConfig() config.EnableEpochs { SaveJailedAlwaysEnableEpoch: 27, ReDelegateBelowMinCheckEnableEpoch: 28, ValidatorToDelegationEnableEpoch: 29, - WaitingListFixEnableEpoch: 30, IncrementSCRNonceInMultiTransferEnableEpoch: 31, ESDTMultiTransferEnableEpoch: 32, GlobalMintBurnDisableEpoch: 33, ESDTTransferRoleEnableEpoch: 34, - BuiltInFunctionOnMetaEnableEpoch: 35, ComputeRewardCheckpointEnableEpoch: 36, SCRSizeInvariantCheckEnableEpoch: 37, BackwardCompSaveKeyValueEnableEpoch: 38, @@ -113,6 +112,18 @@ func createEnableEpochsConfig() config.EnableEpochs { FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 93, ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 94, CurrentRandomnessOnSortingEnableEpoch: 95, + StakeLimitsEnableEpoch: 96, + StakingV4Step1EnableEpoch: 97, + StakingV4Step2EnableEpoch: 98, + StakingV4Step3EnableEpoch: 99, + AlwaysMergeContextsInEEIEnableEpoch: 100, + CleanupAuctionOnLowWaitingListEnableEpoch: 101, + DynamicESDTEnableEpoch: 102, + EGLDInMultiTransferEnableEpoch: 103, + CryptoOpcodesV2EnableEpoch: 104, + RelayedTransactionsV3EnableEpoch: 105, + FixRelayedBaseCostEnableEpoch: 106, + MultiESDTNFTTransferAndExecuteByUserEnableEpoch: 107, } } @@ -191,6 +202,13 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { handler.EpochConfirmed(cfg.SetGuardianEnableEpoch+1, 0) require.True(t, handler.IsFlagEnabled(common.SetGuardianFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch-1, 0) + require.False(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch, 0) + require.True(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch+1, 0) + require.True(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + handler.EpochConfirmed(math.MaxUint32, 0) require.True(t, handler.IsFlagEnabled(common.SCDeployFlag)) require.True(t, handler.IsFlagEnabled(common.BuiltInFunctionsFlag)) @@ -231,7 +249,6 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { require.True(t, handler.IsFlagEnabled(common.ESDTMultiTransferFlag)) require.False(t, handler.IsFlagEnabled(common.GlobalMintBurnFlag)) // < require.True(t, handler.IsFlagEnabled(common.ESDTTransferRoleFlag)) - require.True(t, handler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag)) require.True(t, handler.IsFlagEnabled(common.ComputeRewardCheckpointFlag)) require.True(t, handler.IsFlagEnabled(common.SCRSizeInvariantCheckFlag)) require.False(t, handler.IsFlagEnabled(common.BackwardCompSaveKeyValueFlag)) // < @@ -272,7 +289,6 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { require.True(t, handler.IsFlagEnabled(common.SendAlwaysFlag)) require.True(t, handler.IsFlagEnabled(common.ValueLengthCheckFlag)) require.True(t, handler.IsFlagEnabled(common.CheckTransferFlag)) - require.True(t, handler.IsFlagEnabled(common.TransferToMetaFlag)) require.True(t, handler.IsFlagEnabled(common.ESDTNFTImprovementV1Flag)) require.True(t, handler.IsFlagEnabled(common.ChangeDelegationOwnerFlag)) require.True(t, handler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag)) @@ -291,6 +307,7 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { require.True(t, handler.IsFlagEnabled(common.MultiClaimOnDelegationFlag)) require.True(t, handler.IsFlagEnabled(common.ChangeUsernameFlag)) require.True(t, handler.IsFlagEnabled(common.AutoBalanceDataTriesFlag)) + require.True(t, handler.IsFlagEnabled(common.MigrateDataTrieFlag)) require.True(t, handler.IsFlagEnabled(common.FixDelegationChangeOwnerOnAccountFlag)) require.True(t, handler.IsFlagEnabled(common.FixOOGReturnCodeFlag)) require.True(t, handler.IsFlagEnabled(common.DeterministicSortOnValidatorsInfoFixFlag)) @@ -298,11 +315,19 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { require.True(t, handler.IsFlagEnabled(common.ScToScLogEventFlag)) require.True(t, handler.IsFlagEnabled(common.BlockGasAndFeesReCheckFlag)) require.True(t, handler.IsFlagEnabled(common.BalanceWaitingListsFlag)) - require.True(t, handler.IsFlagEnabled(common.WaitingListFixFlag)) require.True(t, handler.IsFlagEnabled(common.NFTStopCreateFlag)) require.True(t, handler.IsFlagEnabled(common.FixGasRemainingForSaveKeyValueFlag)) require.True(t, handler.IsFlagEnabled(common.IsChangeOwnerAddressCrossShardThroughSCFlag)) require.True(t, handler.IsFlagEnabled(common.CurrentRandomnessOnSortingFlag)) + require.True(t, handler.IsFlagEnabled(common.StakeLimitsFlag)) + require.False(t, handler.IsFlagEnabled(common.StakingV4Step1Flag)) + require.True(t, handler.IsFlagEnabled(common.StakingV4Step2Flag)) + require.True(t, handler.IsFlagEnabled(common.StakingV4Step3Flag)) + require.True(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + require.True(t, handler.IsFlagEnabled(common.AlwaysMergeContextsInEEIFlag)) + require.True(t, handler.IsFlagEnabled(common.DynamicESDTFlag)) + require.True(t, handler.IsFlagEnabled(common.RelayedTransactionsV3Flag)) + require.True(t, handler.IsFlagEnabled(common.FixRelayedBaseCostFlag)) } func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { @@ -344,7 +369,6 @@ func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { require.Equal(t, cfg.ESDTMultiTransferEnableEpoch, handler.GetActivationEpoch(common.ESDTMultiTransferFlag)) require.Equal(t, cfg.GlobalMintBurnDisableEpoch, handler.GetActivationEpoch(common.GlobalMintBurnFlag)) require.Equal(t, cfg.ESDTTransferRoleEnableEpoch, handler.GetActivationEpoch(common.ESDTTransferRoleFlag)) - require.Equal(t, cfg.BuiltInFunctionOnMetaEnableEpoch, handler.GetActivationEpoch(common.BuiltInFunctionOnMetaFlag)) require.Equal(t, cfg.ComputeRewardCheckpointEnableEpoch, handler.GetActivationEpoch(common.ComputeRewardCheckpointFlag)) require.Equal(t, cfg.SCRSizeInvariantCheckEnableEpoch, handler.GetActivationEpoch(common.SCRSizeInvariantCheckFlag)) require.Equal(t, cfg.BackwardCompSaveKeyValueEnableEpoch, handler.GetActivationEpoch(common.BackwardCompSaveKeyValueFlag)) @@ -385,7 +409,6 @@ func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.SendAlwaysFlag)) require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.ValueLengthCheckFlag)) require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.CheckTransferFlag)) - require.Equal(t, cfg.BuiltInFunctionOnMetaEnableEpoch, handler.GetActivationEpoch(common.TransferToMetaFlag)) require.Equal(t, cfg.ESDTMultiTransferEnableEpoch, handler.GetActivationEpoch(common.ESDTNFTImprovementV1Flag)) require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.ChangeDelegationOwnerFlag)) require.Equal(t, cfg.RefactorPeersMiniBlocksEnableEpoch, handler.GetActivationEpoch(common.RefactorPeersMiniBlocksFlag)) @@ -404,6 +427,7 @@ func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { require.Equal(t, cfg.MultiClaimOnDelegationEnableEpoch, handler.GetActivationEpoch(common.MultiClaimOnDelegationFlag)) require.Equal(t, cfg.ChangeUsernameEnableEpoch, handler.GetActivationEpoch(common.ChangeUsernameFlag)) require.Equal(t, cfg.AutoBalanceDataTriesEnableEpoch, handler.GetActivationEpoch(common.AutoBalanceDataTriesFlag)) + require.Equal(t, cfg.MigrateDataTrieEnableEpoch, handler.GetActivationEpoch(common.MigrateDataTrieFlag)) require.Equal(t, cfg.FixDelegationChangeOwnerOnAccountEnableEpoch, handler.GetActivationEpoch(common.FixDelegationChangeOwnerOnAccountFlag)) require.Equal(t, cfg.FixOOGReturnCodeEnableEpoch, handler.GetActivationEpoch(common.FixOOGReturnCodeFlag)) require.Equal(t, cfg.DeterministicSortOnValidatorsInfoEnableEpoch, handler.GetActivationEpoch(common.DeterministicSortOnValidatorsInfoFixFlag)) @@ -411,11 +435,23 @@ func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { require.Equal(t, cfg.ScToScLogEventEnableEpoch, handler.GetActivationEpoch(common.ScToScLogEventFlag)) require.Equal(t, cfg.BlockGasAndFeesReCheckEnableEpoch, handler.GetActivationEpoch(common.BlockGasAndFeesReCheckFlag)) require.Equal(t, cfg.BalanceWaitingListsEnableEpoch, handler.GetActivationEpoch(common.BalanceWaitingListsFlag)) - require.Equal(t, cfg.WaitingListFixEnableEpoch, handler.GetActivationEpoch(common.WaitingListFixFlag)) require.Equal(t, cfg.NFTStopCreateEnableEpoch, handler.GetActivationEpoch(common.NFTStopCreateFlag)) require.Equal(t, cfg.ChangeOwnerAddressCrossShardThroughSCEnableEpoch, handler.GetActivationEpoch(common.IsChangeOwnerAddressCrossShardThroughSCFlag)) require.Equal(t, cfg.FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch, handler.GetActivationEpoch(common.FixGasRemainingForSaveKeyValueFlag)) require.Equal(t, cfg.CurrentRandomnessOnSortingEnableEpoch, handler.GetActivationEpoch(common.CurrentRandomnessOnSortingFlag)) + require.Equal(t, cfg.StakeLimitsEnableEpoch, handler.GetActivationEpoch(common.StakeLimitsFlag)) + require.Equal(t, cfg.StakingV4Step1EnableEpoch, handler.GetActivationEpoch(common.StakingV4Step1Flag)) + require.Equal(t, cfg.StakingV4Step2EnableEpoch, handler.GetActivationEpoch(common.StakingV4Step2Flag)) + require.Equal(t, cfg.StakingV4Step3EnableEpoch, handler.GetActivationEpoch(common.StakingV4Step3Flag)) + require.Equal(t, cfg.CleanupAuctionOnLowWaitingListEnableEpoch, handler.GetActivationEpoch(common.CleanupAuctionOnLowWaitingListFlag)) + require.Equal(t, cfg.StakingV4Step1EnableEpoch, handler.GetActivationEpoch(common.StakingV4StartedFlag)) + require.Equal(t, cfg.AlwaysMergeContextsInEEIEnableEpoch, handler.GetActivationEpoch(common.AlwaysMergeContextsInEEIFlag)) + require.Equal(t, cfg.DynamicESDTEnableEpoch, handler.GetActivationEpoch(common.DynamicESDTFlag)) + require.Equal(t, cfg.EGLDInMultiTransferEnableEpoch, handler.GetActivationEpoch(common.EGLDInESDTMultiTransferFlag)) + require.Equal(t, cfg.CryptoOpcodesV2EnableEpoch, handler.GetActivationEpoch(common.CryptoOpcodesV2Flag)) + require.Equal(t, cfg.RelayedTransactionsV3EnableEpoch, handler.GetActivationEpoch(common.RelayedTransactionsV3Flag)) + require.Equal(t, cfg.FixRelayedBaseCostEnableEpoch, handler.GetActivationEpoch(common.FixRelayedBaseCostFlag)) + require.Equal(t, cfg.MultiESDTNFTTransferAndExecuteByUserEnableEpoch, handler.GetActivationEpoch(common.MultiESDTNFTTransferAndExecuteByUserFlag)) } func TestEnableEpochsHandler_IsInterfaceNil(t *testing.T) { diff --git a/common/holders/rootHashHolder.go b/common/holders/rootHashHolder.go index 68f2a295a1b..47be1787feb 100644 --- a/common/holders/rootHashHolder.go +++ b/common/holders/rootHashHolder.go @@ -1,6 +1,7 @@ package holders import ( + "encoding/hex" "fmt" "github.com/multiversx/mx-chain-core-go/core" @@ -19,6 +20,14 @@ func NewRootHashHolder(rootHash []byte, epoch core.OptionalUint32) *rootHashHold } } +// NewDefaultRootHashesHolder creates a rootHashHolder without an epoch set +func NewDefaultRootHashesHolder(rootHash []byte) *rootHashHolder { + return &rootHashHolder{ + rootHash: rootHash, + epoch: core.OptionalUint32{}, + } +} + // NewRootHashHolderAsEmpty creates an empty rootHashHolder func NewRootHashHolderAsEmpty() *rootHashHolder { return &rootHashHolder{ @@ -39,7 +48,7 @@ func (holder *rootHashHolder) GetEpoch() core.OptionalUint32 { // String returns rootHashesHolder as a string func (holder *rootHashHolder) String() string { - return fmt.Sprintf("root hash %s, epoch %v, has value %v", holder.rootHash, holder.epoch.Value, holder.epoch.HasValue) + return fmt.Sprintf("root hash %s, epoch %v, has value %v", hex.EncodeToString(holder.rootHash), holder.epoch.Value, holder.epoch.HasValue) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/common/holders/rootHashHolder_test.go b/common/holders/rootHashHolder_test.go index 645e73c0551..07e50675d29 100644 --- a/common/holders/rootHashHolder_test.go +++ b/common/holders/rootHashHolder_test.go @@ -1,6 +1,7 @@ package holders import ( + "encoding/hex" "testing" "github.com/multiversx/mx-chain-core-go/core" @@ -32,7 +33,8 @@ func TestNewRootHashHolder_String(t *testing.T) { HasValue: true, }, ) - expectedString := "root hash rootHash, epoch 5, has value true" + hexRootHash := hex.EncodeToString([]byte("rootHash")) + expectedString := "root hash " + hexRootHash + ", epoch 5, has value true" assert.Equal(t, expectedString, holder.String()) } diff --git a/cmd/assessment/hostParameters/hostInfo.go b/common/hostParameters/hostInfo.go similarity index 100% rename from cmd/assessment/hostParameters/hostInfo.go rename to common/hostParameters/hostInfo.go diff --git a/cmd/assessment/hostParameters/hostInfo_test.go b/common/hostParameters/hostInfo_test.go similarity index 100% rename from cmd/assessment/hostParameters/hostInfo_test.go rename to common/hostParameters/hostInfo_test.go diff --git a/cmd/assessment/hostParameters/hostParametersGetter.go b/common/hostParameters/hostParametersGetter.go similarity index 100% rename from cmd/assessment/hostParameters/hostParametersGetter.go rename to common/hostParameters/hostParametersGetter.go diff --git a/cmd/assessment/hostParameters/hostParametersGetter_test.go b/common/hostParameters/hostParametersGetter_test.go similarity index 100% rename from cmd/assessment/hostParameters/hostParametersGetter_test.go rename to common/hostParameters/hostParametersGetter_test.go diff --git a/common/interface.go b/common/interface.go index d55a92853ff..82b2960d0ce 100644 --- a/common/interface.go +++ b/common/interface.go @@ -42,8 +42,7 @@ type Trie interface { Delete(key []byte) error RootHash() ([]byte, error) Commit() error - Recreate(root []byte) (Trie, error) - RecreateFromEpoch(options RootHashHolder) (Trie, error) + Recreate(options RootHashHolder) (Trie, error) String() string GetObsoleteHashes() [][]byte GetDirtyHashes() (ModifiedHashes, error) @@ -223,17 +222,17 @@ type StateStatisticsHandler interface { Reset() ResetSnapshot() - IncrCache() + IncrementCache() Cache() uint64 - IncrSnapshotCache() + IncrementSnapshotCache() SnapshotCache() uint64 - IncrPersister(epoch uint32) + IncrementPersister(epoch uint32) Persister(epoch uint32) uint64 - IncrSnapshotPersister(epoch uint32) + IncrementSnapshotPersister(epoch uint32) SnapshotPersister(epoch uint32) uint64 - IncrTrie() + IncrementTrie() Trie() uint64 ProcessingStats() []string @@ -314,6 +313,7 @@ type ManagedPeersHolder interface { IncrementRoundsWithoutReceivedMessages(pkBytes []byte) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) GetManagedKeysByCurrentNode() map[string]crypto.PrivateKey + GetLoadedKeysByCurrentNode() [][]byte IsKeyManagedByCurrentNode(pkBytes []byte) bool IsKeyRegistered(pkBytes []byte) bool IsPidManagedByCurrentNode(pid core.PeerID) bool @@ -322,6 +322,7 @@ type ManagedPeersHolder interface { GetNextPeerAuthenticationTime(pkBytes []byte) (time.Time, error) SetNextPeerAuthenticationTime(pkBytes []byte, nextTime time.Time) IsMultiKeyMode() bool + GetRedundancyStepInReason() string IsInterfaceNil() bool } @@ -342,6 +343,7 @@ type StateSyncNotifierSubscriber interface { type ManagedPeersMonitor interface { GetManagedKeysCount() int GetManagedKeys() [][]byte + GetLoadedKeys() [][]byte GetEligibleManagedKeys() ([][]byte, error) GetWaitingManagedKeys() ([][]byte, error) IsInterfaceNil() bool diff --git a/common/operationmodes/historicalBalances.go b/common/operationmodes/historicalBalances.go new file mode 100644 index 00000000000..da3cfe98dde --- /dev/null +++ b/common/operationmodes/historicalBalances.go @@ -0,0 +1,41 @@ +package operationmodes + +import ( + "github.com/multiversx/mx-chain-go/config" + logger "github.com/multiversx/mx-chain-logger-go" +) + +// ProcessHistoricalBalancesMode will process the provided flags for the historical balances +func ProcessHistoricalBalancesMode(log logger.Logger, configs *config.Configs) { + configs.GeneralConfig.StoragePruning.Enabled = true + configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData = false + configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData = false + configs.GeneralConfig.GeneralSettings.StartInEpochEnabled = false + configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData = false + configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled = false + configs.GeneralConfig.DbLookupExtensions.Enabled = true + configs.PreferencesConfig.Preferences.FullArchive = true + + log.Warn("the node is in historical balances mode! Will auto-set some config values", + "StoragePruning.Enabled", configs.GeneralConfig.StoragePruning.Enabled, + "StoragePruning.ValidatorCleanOldEpochsData", configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData, + "StoragePruning.ObserverCleanOldEpochsData", configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData, + "StoragePruning.AccountsTrieCleanOldEpochsData", configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData, + "GeneralSettings.StartInEpochEnabled", configs.GeneralConfig.GeneralSettings.StartInEpochEnabled, + "StateTriesConfig.AccountsStatePruningEnabled", configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled, + "DbLookupExtensions.Enabled", configs.GeneralConfig.DbLookupExtensions.Enabled, + "Preferences.FullArchive", configs.PreferencesConfig.Preferences.FullArchive, + ) +} + +// IsInHistoricalBalancesMode returns true if the configuration provided denotes a historical balances mode +func IsInHistoricalBalancesMode(configs *config.Configs) bool { + return configs.GeneralConfig.StoragePruning.Enabled && + !configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData && + !configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData && + !configs.GeneralConfig.GeneralSettings.StartInEpochEnabled && + !configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData && + !configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled && + configs.GeneralConfig.DbLookupExtensions.Enabled && + configs.PreferencesConfig.Preferences.FullArchive +} diff --git a/common/operationmodes/historicalBalances_test.go b/common/operationmodes/historicalBalances_test.go new file mode 100644 index 00000000000..d06061c3027 --- /dev/null +++ b/common/operationmodes/historicalBalances_test.go @@ -0,0 +1,141 @@ +package operationmodes + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/assert" +) + +func TestProcessHistoricalBalancesMode(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + + assert.True(t, cfg.GeneralConfig.StoragePruning.Enabled) + assert.False(t, cfg.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData) + assert.False(t, cfg.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData) + assert.False(t, cfg.GeneralConfig.GeneralSettings.StartInEpochEnabled) + assert.False(t, cfg.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData) + assert.False(t, cfg.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled) + assert.True(t, cfg.GeneralConfig.DbLookupExtensions.Enabled) + assert.True(t, cfg.PreferencesConfig.Preferences.FullArchive) +} + +func TestIsInHistoricalBalancesMode(t *testing.T) { + t.Parallel() + + t.Run("empty configs should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("storage pruning disabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StoragePruning.Enabled = false + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("validator clean old epoch data enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("observer clean old epoch data enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("start in epoch enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.GeneralSettings.StartInEpochEnabled = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("accounts trie clean old epoch data enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("accounts state pruning enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("db lookup extension disabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.DbLookupExtensions.Enabled = false + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("not a full archive node should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.PreferencesConfig.Preferences.FullArchive = false + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("with historical balances config should return true", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + assert.True(t, IsInHistoricalBalancesMode(cfg)) + }) + +} diff --git a/common/operationmodes/operationmodes.go b/common/operationmodes/operationmodes.go index 70aed256f4b..1ae6a6fad70 100644 --- a/common/operationmodes/operationmodes.go +++ b/common/operationmodes/operationmodes.go @@ -5,6 +5,7 @@ import ( "strings" ) +// constants that define the operation mode of the node const ( OperationModeFullArchive = "full-archive" OperationModeDbLookupExtension = "db-lookup-extension" diff --git a/common/reflectcommon/export_test.go b/common/reflectcommon/export_test.go new file mode 100644 index 00000000000..473dc1b6fc7 --- /dev/null +++ b/common/reflectcommon/export_test.go @@ -0,0 +1,18 @@ +package reflectcommon + +import "reflect" + +// FitsWithinSignedIntegerRange - +func FitsWithinSignedIntegerRange(value reflect.Value, targetType reflect.Type) bool { + return fitsWithinSignedIntegerRange(value, targetType) +} + +// FitsWithinUnsignedIntegerRange - +func FitsWithinUnsignedIntegerRange(value reflect.Value, targetType reflect.Type) bool { + return fitsWithinUnsignedIntegerRange(value, targetType) +} + +// FitsWithinFloatRange - +func FitsWithinFloatRange(value reflect.Value, targetType reflect.Type) bool { + return fitsWithinFloatRange(value, targetType) +} diff --git a/common/reflectcommon/structFieldsUpdate.go b/common/reflectcommon/structFieldsUpdate.go index 6f07d68e7a6..be8671eff4f 100644 --- a/common/reflectcommon/structFieldsUpdate.go +++ b/common/reflectcommon/structFieldsUpdate.go @@ -2,8 +2,8 @@ package reflectcommon import ( "fmt" + "math" "reflect" - "strconv" "strings" "github.com/multiversx/mx-chain-core-go/core/check" @@ -33,7 +33,7 @@ func getReflectValue(original reflect.Value, fieldName string) (value reflect.Va // the structure must be of type pointer, otherwise an error will be returned. All the fields or inner structures MUST be exported // the path must be in the form of InnerStruct.InnerStruct2.Field // newValue must have the same type as the old value, otherwise an error will be returned. Currently, this function does not support slices or maps -func AdaptStructureValueBasedOnPath(structure interface{}, path string, newValue string) (err error) { +func AdaptStructureValueBasedOnPath(structure interface{}, path string, newValue interface{}) (err error) { defer func() { r := recover() if r != nil { @@ -72,76 +72,338 @@ func AdaptStructureValueBasedOnPath(structure interface{}, path string, newValue return trySetTheNewValue(&value, newValue) } -func trySetTheNewValue(value *reflect.Value, newValue string) error { +func trySetTheNewValue(value *reflect.Value, newValue interface{}) error { valueKind := value.Kind() errFunc := func() error { - return fmt.Errorf("cannot cast field <%s> to kind <%s>", newValue, valueKind) + return fmt.Errorf("unable to cast value '%v' of type <%s> to type <%s>", newValue, reflect.TypeOf(newValue), valueKind) } switch valueKind { case reflect.Invalid: return errFunc() case reflect.Bool: - boolVal, err := strconv.ParseBool(newValue) - if err != nil { - return fmt.Errorf("%w: %s", errFunc(), err.Error()) + boolVal, ok := newValue.(bool) + if !ok { + return errFunc() } - value.Set(reflect.ValueOf(boolVal)) - case reflect.Int: - intVal, err := strconv.ParseInt(newValue, 10, 64) - if err != nil { - return fmt.Errorf("%w: %s", errFunc(), err.Error()) + value.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + intVal, ok := convertToSignedInteger(value, newValue) + if !ok { + return errFunc() } - value.Set(reflect.ValueOf(int(intVal))) - case reflect.Int32: - int32Val, err := strconv.ParseInt(newValue, 10, 32) - if err != nil { - return fmt.Errorf("%w: %s", errFunc(), err.Error()) + value.Set(*intVal) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + uintVal, ok := convertToUnsignedInteger(value, newValue) + if !ok { + return errFunc() } - value.Set(reflect.ValueOf(int32(int32Val))) - case reflect.Int64: - int64Val, err := strconv.ParseInt(newValue, 10, 64) - if err != nil { - return fmt.Errorf("%w: %s", errFunc(), err.Error()) + value.Set(*uintVal) + case reflect.Float32, reflect.Float64: + floatVal, ok := convertToFloat(value, newValue) + if !ok { + return errFunc() } - value.Set(reflect.ValueOf(int64Val)) - case reflect.Uint32: - uint32Val, err := strconv.ParseUint(newValue, 10, 32) - if err != nil { - return fmt.Errorf("%w: %s", errFunc(), err.Error()) + value.Set(*floatVal) + case reflect.String: + strVal, ok := newValue.(string) + if !ok { + return errFunc() } - value.Set(reflect.ValueOf(uint32(uint32Val))) - case reflect.Uint64: - uint64Val, err := strconv.ParseUint(newValue, 10, 64) + value.SetString(strVal) + case reflect.Slice: + return trySetSliceValue(value, newValue) + case reflect.Struct: + structVal := reflect.ValueOf(newValue) + + return trySetStructValue(value, structVal) + case reflect.Map: + mapValue := reflect.ValueOf(newValue) + + return tryUpdateMapValue(value, mapValue) + default: + return fmt.Errorf("unsupported type <%s> when trying to set the value '%v' of type <%s>", valueKind, newValue, reflect.TypeOf(newValue)) + } + return nil +} + +func trySetSliceValue(value *reflect.Value, newValue interface{}) error { + sliceVal := reflect.ValueOf(newValue) + newSlice := reflect.MakeSlice(value.Type(), sliceVal.Len(), sliceVal.Len()) + + for i := 0; i < sliceVal.Len(); i++ { + item := sliceVal.Index(i) + newItem := reflect.New(value.Type().Elem()).Elem() + + err := trySetStructValue(&newItem, item) if err != nil { - return fmt.Errorf("%w: %s", errFunc(), err.Error()) + return err } - value.Set(reflect.ValueOf(uint64Val)) - case reflect.Float32: - float32Val, err := strconv.ParseFloat(newValue, 32) - if err != nil { - return fmt.Errorf("%w: %s", errFunc(), err.Error()) + newSlice.Index(i).Set(newItem) + } + + value.Set(newSlice) + + return nil +} + +func trySetStructValue(value *reflect.Value, newValue reflect.Value) error { + switch newValue.Kind() { + case reflect.Invalid: + return fmt.Errorf("invalid new value kind") + case reflect.Map: // overwrite with value read from toml file + return updateStructFromMap(value, newValue) + case reflect.Struct: // overwrite with go struct + return updateStructFromStruct(value, newValue) + default: + return fmt.Errorf("unsupported type <%s> when trying to set the value of type <%s>", newValue.Kind(), value.Kind()) + } +} + +func tryUpdateMapValue(value *reflect.Value, newValue reflect.Value) error { + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + switch newValue.Kind() { + case reflect.Map: + for _, key := range newValue.MapKeys() { + item := newValue.MapIndex(key) + newItem := reflect.New(value.Type().Elem()).Elem() + + err := trySetTheNewValue(&newItem, item.Interface()) + if err != nil { + return err + } + + value.SetMapIndex(key, newItem) } + default: + return fmt.Errorf("unsupported type <%s> when trying to add value in type <%s>", newValue.Kind(), value.Kind()) + } - value.Set(reflect.ValueOf(float32(float32Val))) - case reflect.Float64: - float64Val, err := strconv.ParseFloat(newValue, 32) - if err != nil { - return fmt.Errorf("%w: %s", errFunc(), err.Error()) + return nil +} + +func updateStructFromMap(value *reflect.Value, newValue reflect.Value) error { + for _, key := range newValue.MapKeys() { + fieldName := key.String() + field := value.FieldByName(fieldName) + + if field.IsValid() && field.CanSet() { + err := trySetTheNewValue(&field, newValue.MapIndex(key).Interface()) + if err != nil { + return err + } + } else { + return fmt.Errorf("field <%s> not found or cannot be set", fieldName) } + } - value.Set(reflect.ValueOf(float64Val)) - case reflect.String: - value.Set(reflect.ValueOf(newValue)) - default: - return fmt.Errorf("unsupported type <%s> when trying to set the value <%s>", valueKind, newValue) + return nil +} + +func updateStructFromStruct(value *reflect.Value, newValue reflect.Value) error { + for i := 0; i < newValue.NumField(); i++ { + fieldName := newValue.Type().Field(i).Name + field := value.FieldByName(fieldName) + + if field.IsValid() && field.CanSet() { + err := trySetTheNewValue(&field, newValue.Field(i).Interface()) + if err != nil { + return err + } + } else { + return fmt.Errorf("field <%s> not found or cannot be set", fieldName) + } } + return nil } + +func convertToSignedInteger(value *reflect.Value, newValue interface{}) (*reflect.Value, bool) { + var reflectVal = reflect.ValueOf(newValue) + + if !isIntegerType(reflectVal.Type()) { + return nil, false + } + + if !fitsWithinSignedIntegerRange(reflectVal, value.Type()) { + return nil, false + } + + convertedVal := reflectVal.Convert(value.Type()) + return &convertedVal, true +} + +func convertToUnsignedInteger(value *reflect.Value, newValue interface{}) (*reflect.Value, bool) { + var reflectVal = reflect.ValueOf(newValue) + + if !isIntegerType(reflectVal.Type()) { + return nil, false + } + + if !fitsWithinUnsignedIntegerRange(reflectVal, value.Type()) { + return nil, false + } + + convertedVal := reflectVal.Convert(value.Type()) + return &convertedVal, true +} + +func isIntegerType(value reflect.Type) bool { + switch value.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + default: + return false + } +} + +func fitsWithinSignedIntegerRange(value reflect.Value, targetType reflect.Type) bool { + min, errMin := getMinInt(targetType) + max, errMax := getMaxInt(targetType) + if errMin != nil || errMax != nil { + return false + } + + switch value.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return value.Int() >= min && value.Int() <= max + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return value.Uint() <= uint64(max) + } + + return false +} + +func fitsWithinUnsignedIntegerRange(value reflect.Value, targetType reflect.Type) bool { + max, err := getMaxUint(targetType) + if err != nil { + return false + } + + switch value.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return value.Int() >= 0 && uint64(value.Int()) <= max + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return value.Uint() <= max + } + + return false +} + +func convertToFloat(value *reflect.Value, newValue interface{}) (*reflect.Value, bool) { + var reflectVal = reflect.ValueOf(newValue) + + if !isFloatType(reflectVal.Type()) { + return nil, false + } + + if !fitsWithinFloatRange(reflectVal, value.Type()) { + return nil, false + } + + convertedVal := reflectVal.Convert(value.Type()) + return &convertedVal, true +} + +func isFloatType(value reflect.Type) bool { + switch value.Kind() { + case reflect.Float32, reflect.Float64: + return true + default: + return false + } +} + +func fitsWithinFloatRange(value reflect.Value, targetType reflect.Type) bool { + min, errMin := getMinFloat(targetType) + max, errMax := getMaxFloat(targetType) + if errMin != nil || errMax != nil { + return false + } + + return value.Float() >= min && value.Float() <= max +} + +func getMinInt(targetType reflect.Type) (int64, error) { + switch targetType.Kind() { + case reflect.Int: + return math.MinInt, nil + case reflect.Int64: + return math.MinInt64, nil + case reflect.Int8: + return math.MinInt8, nil + case reflect.Int16: + return math.MinInt16, nil + case reflect.Int32: + return math.MinInt32, nil + default: + return 0, fmt.Errorf("target type is not integer") + } +} + +func getMaxInt(targetType reflect.Type) (int64, error) { + switch targetType.Kind() { + case reflect.Int: + return math.MaxInt, nil + case reflect.Int64: + return math.MaxInt64, nil + case reflect.Int8: + return math.MaxInt8, nil + case reflect.Int16: + return math.MaxInt16, nil + case reflect.Int32: + return math.MaxInt32, nil + default: + return 0, fmt.Errorf("target type is not integer") + } +} + +func getMaxUint(targetType reflect.Type) (uint64, error) { + switch targetType.Kind() { + case reflect.Uint: + return math.MaxUint, nil + case reflect.Uint64: + return math.MaxUint64, nil + case reflect.Uint8: + return math.MaxUint8, nil + case reflect.Uint16: + return math.MaxUint16, nil + case reflect.Uint32: + return math.MaxUint32, nil + default: + return 0, fmt.Errorf("taget type is not unsigned integer") + } +} + +func getMinFloat(targetType reflect.Type) (float64, error) { + switch targetType.Kind() { + case reflect.Float32: + return -math.MaxFloat32, nil + case reflect.Float64: + return -math.MaxFloat64, nil + default: + return 0, fmt.Errorf("target type is not float") + } +} + +func getMaxFloat(targetType reflect.Type) (float64, error) { + switch targetType.Kind() { + case reflect.Float32: + return math.MaxFloat32, nil + case reflect.Float64: + return math.MaxFloat64, nil + default: + return 0, fmt.Errorf("target type is not float") + } +} diff --git a/common/reflectcommon/structFieldsUpdate_test.go b/common/reflectcommon/structFieldsUpdate_test.go index bc7083e885e..44d3ae7d694 100644 --- a/common/reflectcommon/structFieldsUpdate_test.go +++ b/common/reflectcommon/structFieldsUpdate_test.go @@ -2,9 +2,13 @@ package reflectcommon import ( "fmt" + "reflect" "testing" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/testscommon/toml" + + "github.com/multiversx/mx-chain-core-go/core" "github.com/stretchr/testify/require" ) @@ -68,7 +72,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { require.Equal(t, "invalid structure name: FilePath2", err.Error()) }) - t.Run("should error when setting on unsupported type", func(t *testing.T) { + t.Run("should error when setting unsupported type on struct", func(t *testing.T) { t.Parallel() path := "TrieSyncStorage.DB" @@ -77,7 +81,18 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) - require.ErrorContains(t, err, "unsupported type when trying to set the value ") + require.Equal(t, "unsupported type when trying to set the value of type ", err.Error()) + }) + + t.Run("should error when setting invalid type on struct", func(t *testing.T) { + t.Parallel() + + path := "TrieSyncStorage.DB" + cfg := &config.Config{} + + err := AdaptStructureValueBasedOnPath(cfg, path, nil) + + require.Equal(t, "invalid new value kind", err.Error()) }) t.Run("should error when setting invalid uint32", func(t *testing.T) { @@ -90,7 +105,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) - require.ErrorContains(t, err, "cannot cast field to kind ") + require.Equal(t, "unable to cast value 'invalid uint32' of type to type ", err.Error()) }) t.Run("should error when setting invalid uint64", func(t *testing.T) { @@ -103,7 +118,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) - require.ErrorContains(t, err, "cannot cast field to kind ") + require.Equal(t, "unable to cast value 'invalid uint64' of type to type ", err.Error()) }) t.Run("should error when setting invalid float32", func(t *testing.T) { @@ -116,7 +131,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) - require.ErrorContains(t, err, "cannot cast field to kind ") + require.Equal(t, "unable to cast value 'invalid float32' of type to type ", err.Error()) }) t.Run("should error when setting invalid float64", func(t *testing.T) { @@ -129,7 +144,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) - require.ErrorContains(t, err, "cannot cast field to kind ") + require.Equal(t, "unable to cast value 'invalid float64' of type to type ", err.Error()) }) t.Run("should error when setting invalid int64", func(t *testing.T) { @@ -142,20 +157,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) - require.ErrorContains(t, err, "cannot cast field to kind ") - }) - - t.Run("should error when setting invalid int64", func(t *testing.T) { - t.Parallel() - - path := "HeartbeatV2.HeartbeatExpiryTimespanInSec" - expectedNewValue := "invalid int64" - cfg := &config.Config{} - cfg.HeartbeatV2.HeartbeatExpiryTimespanInSec = 37 - - err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) - - require.ErrorContains(t, err, "cannot cast field to kind ") + require.Equal(t, "unable to cast value 'invalid int64' of type to type ", err.Error()) }) t.Run("should error when setting invalid int", func(t *testing.T) { @@ -168,7 +170,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) - require.ErrorContains(t, err, "cannot cast field to kind ") + require.Equal(t, "unable to cast value 'invalid int' of type to type ", err.Error()) }) t.Run("should error when setting invalid bool", func(t *testing.T) { @@ -181,7 +183,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) - require.ErrorContains(t, err, "cannot cast field to kind ") + require.Equal(t, "unable to cast value 'invalid bool' of type to type ", err.Error()) }) t.Run("should error if the field is un-settable / unexported", func(t *testing.T) { @@ -279,7 +281,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { cfg := &config.Config{} cfg.StoragePruning.FullArchiveNumActivePersisters = 37 - err := AdaptStructureValueBasedOnPath(cfg, path, fmt.Sprintf("%d", expectedNewValue)) + err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) require.NoError(t, err) require.Equal(t, expectedNewValue, cfg.StoragePruning.FullArchiveNumActivePersisters) @@ -293,7 +295,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { cfg := &config.Config{} cfg.HeartbeatV2.MinPeersThreshold = 37.0 - err := AdaptStructureValueBasedOnPath(cfg, path, fmt.Sprintf("%f", expectedNewValue)) + err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) require.NoError(t, err) require.Equal(t, expectedNewValue, cfg.HeartbeatV2.MinPeersThreshold) @@ -307,7 +309,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { cfg := &config.Config{} cfg.HeartbeatV2.PeerAuthenticationTimeThresholdBetweenSends = 37.0 - err := AdaptStructureValueBasedOnPath(cfg, path, fmt.Sprintf("%f", expectedNewValue)) + err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) require.NoError(t, err) require.Equal(t, expectedNewValue, cfg.HeartbeatV2.PeerAuthenticationTimeThresholdBetweenSends) @@ -321,7 +323,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { cfg := &config.Config{} cfg.Debug.InterceptorResolver.DebugLineExpiration = 37 - err := AdaptStructureValueBasedOnPath(cfg, path, fmt.Sprintf("%d", expectedNewValue)) + err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) require.NoError(t, err) require.Equal(t, expectedNewValue, cfg.Debug.InterceptorResolver.DebugLineExpiration) @@ -335,7 +337,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { cfg := &config.Config{} cfg.Hardfork.GenesisTime = 37 - err := AdaptStructureValueBasedOnPath(cfg, path, fmt.Sprintf("%d", expectedNewValue)) + err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) require.NoError(t, err) require.Equal(t, expectedNewValue, cfg.Hardfork.GenesisTime) @@ -349,7 +351,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { cfg := &config.Config{} cfg.TrieSyncStorage.SizeInBytes = 37 - err := AdaptStructureValueBasedOnPath(cfg, path, fmt.Sprintf("%d", expectedNewValue)) + err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) require.NoError(t, err) require.Equal(t, expectedNewValue, cfg.TrieSyncStorage.SizeInBytes) @@ -362,7 +364,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { cfg := &config.Config{} cfg.StoragePruning.AccountsTrieCleanOldEpochsData = false - err := AdaptStructureValueBasedOnPath(cfg, path, fmt.Sprintf("%v", true)) + err := AdaptStructureValueBasedOnPath(cfg, path, true) require.NoError(t, err) require.True(t, cfg.StoragePruning.AccountsTrieCleanOldEpochsData) @@ -376,7 +378,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { cfg.StoragePruning.FullArchiveNumActivePersisters = uint32(50) expectedNewValue := uint32(37) - err := AdaptStructureValueBasedOnPath(cfg, path, fmt.Sprintf("%d", expectedNewValue)) + err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) require.NoError(t, err) require.Equal(t, expectedNewValue, cfg.StoragePruning.FullArchiveNumActivePersisters) @@ -390,7 +392,7 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { cfg.Antiflood.NumConcurrentResolverJobs = int32(50) expectedNewValue := int32(37) - err := AdaptStructureValueBasedOnPath(cfg, path, fmt.Sprintf("%d", expectedNewValue)) + err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) require.NoError(t, err) require.Equal(t, expectedNewValue, cfg.Antiflood.NumConcurrentResolverJobs) @@ -418,11 +420,810 @@ func TestAdaptStructureValueBasedOnPath(t *testing.T) { cfg.Hardfork.ExportKeysStorageConfig.DB.MaxBatchSize = 10 expectedNewValue := 37 - err := AdaptStructureValueBasedOnPath(cfg, path, fmt.Sprintf("%d", expectedNewValue)) + err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) require.NoError(t, err) require.Equal(t, expectedNewValue, cfg.Hardfork.ExportKeysStorageConfig.DB.MaxBatchSize) }) + + t.Run("should error if setting int into string", func(t *testing.T) { + t.Parallel() + + path := "GeneralSettings.ChainID" + cfg := &config.Config{} + cfg.GeneralSettings.ChainID = "D" + expectedNewValue := 1 + + err := AdaptStructureValueBasedOnPath(cfg, path, expectedNewValue) + require.Equal(t, "unable to cast value '1' of type to type ", err.Error()) + }) + + t.Run("should error for unsupported type", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + expectedNewValue := make(map[string]int) + expectedNewValue["first"] = 1 + expectedNewValue["second"] = 2 + + path := "TestInterface.Value" + + err = AdaptStructureValueBasedOnPath(testConfig, path, expectedNewValue) + require.Equal(t, "unsupported type when trying to set the value 'map[first:1 second:2]' of type ", err.Error()) + }) + + t.Run("should error fit signed for target type not int", func(t *testing.T) { + t.Parallel() + + expectedNewValue := 10 + reflectNewValue := reflect.ValueOf(expectedNewValue) + targetType := reflect.TypeOf("string") + + res := FitsWithinSignedIntegerRange(reflectNewValue, targetType) + require.False(t, res) + }) + + t.Run("should error fit signed for value not int and target type int", func(t *testing.T) { + t.Parallel() + + expectedNewValue := "value" + reflectNewValue := reflect.ValueOf(expectedNewValue) + targetType := reflect.TypeOf(10) + + res := FitsWithinSignedIntegerRange(reflectNewValue, targetType) + require.False(t, res) + }) + + t.Run("should error fit unsigned for target type not uint", func(t *testing.T) { + t.Parallel() + + expectedNewValue := uint(10) + reflectNewValue := reflect.ValueOf(expectedNewValue) + targetType := reflect.TypeOf("string") + + res := FitsWithinUnsignedIntegerRange(reflectNewValue, targetType) + require.False(t, res) + }) + + t.Run("should error fit unsigned for value not uint and target type uint", func(t *testing.T) { + t.Parallel() + + expectedNewValue := "value" + reflectNewValue := reflect.ValueOf(expectedNewValue) + targetType := reflect.TypeOf(uint(10)) + + res := FitsWithinUnsignedIntegerRange(reflectNewValue, targetType) + require.False(t, res) + }) + + t.Run("should error fit float for target type not float", func(t *testing.T) { + t.Parallel() + + expectedNewValue := float32(10) + reflectNewValue := reflect.ValueOf(expectedNewValue) + targetType := reflect.TypeOf("string") + + res := FitsWithinFloatRange(reflectNewValue, targetType) + require.False(t, res) + }) + + t.Run("should work and override int8 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[0].Path, overrideConfig.OverridableConfigTomlValues[0].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[0].Value, int64(testConfig.Int8.Number)) + }) + + t.Run("should error int8 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[1].Path, overrideConfig.OverridableConfigTomlValues[1].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '128' of type to type ", err.Error()) + }) + + t.Run("should work and override int8 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[2].Path, overrideConfig.OverridableConfigTomlValues[2].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[2].Value, int64(testConfig.Int8.Number)) + }) + + t.Run("should error int8 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[3].Path, overrideConfig.OverridableConfigTomlValues[3].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '-129' of type to type ", err.Error()) + }) + + t.Run("should work and override int16 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[4].Path, overrideConfig.OverridableConfigTomlValues[4].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[4].Value, int64(testConfig.Int16.Number)) + }) + + t.Run("should error int16 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[5].Path, overrideConfig.OverridableConfigTomlValues[5].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '32768' of type to type ", err.Error()) + }) + + t.Run("should work and override int16 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[6].Path, overrideConfig.OverridableConfigTomlValues[6].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[6].Value, int64(testConfig.Int16.Number)) + }) + + t.Run("should error int16 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[7].Path, overrideConfig.OverridableConfigTomlValues[7].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '-32769' of type to type ", err.Error()) + }) + + t.Run("should work and override int32 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[8].Path, overrideConfig.OverridableConfigTomlValues[8].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[8].Value, int64(testConfig.Int32.Number)) + }) + + t.Run("should work and override int32 value with uint16", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + expectedNewValue := uint16(10) + + path := "TestConfigI32.Int32.Number" + + err = AdaptStructureValueBasedOnPath(testConfig, path, expectedNewValue) + require.NoError(t, err) + require.Equal(t, int32(expectedNewValue), testConfig.Int32.Number) + }) + + t.Run("should error int32 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[9].Path, overrideConfig.OverridableConfigTomlValues[9].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '2147483648' of type to type ", err.Error()) + }) + + t.Run("should work and override int32 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[10].Path, overrideConfig.OverridableConfigTomlValues[10].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[10].Value, int64(testConfig.Int32.Number)) + }) + + t.Run("should error int32 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[11].Path, overrideConfig.OverridableConfigTomlValues[11].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '-2147483649' of type to type ", err.Error()) + }) + + t.Run("should work and override int64 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[12].Path, overrideConfig.OverridableConfigTomlValues[12].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[12].Value, int64(testConfig.Int64.Number)) + }) + + t.Run("should work and override int64 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[13].Path, overrideConfig.OverridableConfigTomlValues[13].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[13].Value, int64(testConfig.Int64.Number)) + }) + + t.Run("should work and override uint8 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[14].Path, overrideConfig.OverridableConfigTomlValues[14].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[14].Value, int64(testConfig.Uint8.Number)) + }) + + t.Run("should error uint8 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[15].Path, overrideConfig.OverridableConfigTomlValues[15].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '256' of type to type ", err.Error()) + }) + + t.Run("should error uint8 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[16].Path, overrideConfig.OverridableConfigTomlValues[16].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '-256' of type to type ", err.Error()) + }) + + t.Run("should work and override uint16 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[17].Path, overrideConfig.OverridableConfigTomlValues[17].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[17].Value, int64(testConfig.Uint16.Number)) + }) + + t.Run("should error uint16 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[18].Path, overrideConfig.OverridableConfigTomlValues[18].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '65536' of type to type ", err.Error()) + }) + + t.Run("should error uint16 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[19].Path, overrideConfig.OverridableConfigTomlValues[19].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '-65536' of type to type ", err.Error()) + }) + + t.Run("should work and override uint32 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[20].Path, overrideConfig.OverridableConfigTomlValues[20].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[20].Value, int64(testConfig.Uint32.Number)) + }) + + t.Run("should error uint32 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[21].Path, overrideConfig.OverridableConfigTomlValues[21].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '4294967296' of type to type ", err.Error()) + }) + + t.Run("should error uint32 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[22].Path, overrideConfig.OverridableConfigTomlValues[22].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '-4294967296' of type to type ", err.Error()) + }) + + t.Run("should work and override uint64 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[23].Path, overrideConfig.OverridableConfigTomlValues[23].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[23].Value, int64(testConfig.Uint64.Number)) + }) + + t.Run("should error uint64 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[24].Path, overrideConfig.OverridableConfigTomlValues[24].Value) + require.Equal(t, "unable to cast value '-9223372036854775808' of type to type ", err.Error()) + }) + + t.Run("should work and override float32 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[25].Path, overrideConfig.OverridableConfigTomlValues[25].Value) + require.NoError(t, err) + require.Equal(t, float32(3.4), testConfig.Float32.Number) + }) + + t.Run("should error float32 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[26].Path, overrideConfig.OverridableConfigTomlValues[26].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '3.4e+39' of type to type ", err.Error()) + }) + + t.Run("should work and override float32 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[27].Path, overrideConfig.OverridableConfigTomlValues[27].Value) + require.NoError(t, err) + require.Equal(t, float32(-3.4), testConfig.Float32.Number) + }) + + t.Run("should error float32 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[28].Path, overrideConfig.OverridableConfigTomlValues[28].Value) + require.NotNil(t, err) + require.Equal(t, "unable to cast value '-3.4e+40' of type to type ", err.Error()) + }) + + t.Run("should work and override float64 value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[29].Path, overrideConfig.OverridableConfigTomlValues[29].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[29].Value, testConfig.Float64.Number) + }) + + t.Run("should work and override float64 negative value", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[30].Path, overrideConfig.OverridableConfigTomlValues[30].Value) + require.NoError(t, err) + require.Equal(t, overrideConfig.OverridableConfigTomlValues[30].Value, testConfig.Float64.Number) + }) + + t.Run("should work and override struct", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + expectedNewValue := toml.Description{ + Number: 11, + } + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[31].Path, overrideConfig.OverridableConfigTomlValues[31].Value) + require.NoError(t, err) + require.Equal(t, expectedNewValue, testConfig.TestConfigStruct.ConfigStruct.Description) + }) + + t.Run("should error with field not found", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[32].Path, overrideConfig.OverridableConfigTomlValues[32].Value) + require.Equal(t, "field not found or cannot be set", err.Error()) + }) + + t.Run("should error with different types", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[33].Path, overrideConfig.OverridableConfigTomlValues[33].Value) + require.Equal(t, "unable to cast value '11' of type to type ", err.Error()) + }) + + t.Run("should work and override nested struct", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + expectedNewValue := toml.ConfigNestedStruct{ + Text: "Overwritten text", + Message: toml.Message{ + Public: false, + MessageDescription: []toml.MessageDescription{ + {Text: "Overwritten Text1"}, + }, + }, + } + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[34].Path, overrideConfig.OverridableConfigTomlValues[34].Value) + require.NoError(t, err) + require.Equal(t, expectedNewValue, testConfig.TestConfigNestedStruct.ConfigNestedStruct) + }) + + t.Run("should work on slice and override map", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + expectedNewValue := []toml.MessageDescription{ + {Text: "Overwritten Text1"}, + {Text: "Overwritten Text2"}, + } + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[35].Path, overrideConfig.OverridableConfigTomlValues[35].Value) + require.NoError(t, err) + require.Equal(t, expectedNewValue, testConfig.TestConfigNestedStruct.ConfigNestedStruct.Message.MessageDescription) + }) + + t.Run("should error on slice when override int", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + path := "TestConfigNestedStruct.ConfigNestedStruct.Message.MessageDescription" + + err = AdaptStructureValueBasedOnPath(testConfig, path, 10) + require.Equal(t, "reflect: call of reflect.Value.Len on int Value", err.Error()) + }) + + t.Run("should error on slice when override different type", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + path := "TestConfigNestedStruct.ConfigNestedStruct.Message.MessageDescription" + + expectedNewValue := []int{10, 20} + + err = AdaptStructureValueBasedOnPath(testConfig, path, expectedNewValue) + require.Equal(t, "unsupported type when trying to set the value of type ", err.Error()) + }) + + t.Run("should error on slice when override different struct", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + path := "TestConfigNestedStruct.ConfigNestedStruct.Message.MessageDescription" + + var expectedNewValue = []toml.MessageDescriptionOtherName{ + {Value: "10"}, + {Value: "20"}, + } + + err = AdaptStructureValueBasedOnPath(testConfig, path, expectedNewValue) + require.Equal(t, "field not found or cannot be set", err.Error()) + }) + + t.Run("should error on slice when override different struct types", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + path := "TestConfigNestedStruct.ConfigNestedStruct.Message.MessageDescription" + + var expectedNewValue = []toml.MessageDescriptionOtherType{ + {Text: 10}, + {Text: 20}, + } + + err = AdaptStructureValueBasedOnPath(testConfig, path, expectedNewValue) + require.Equal(t, "unable to cast value '10' of type to type ", err.Error()) + }) + + t.Run("should work on slice and override struct", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + path := "TestConfigNestedStruct.ConfigNestedStruct.Message.MessageDescription" + + var expectedNewValue = []toml.MessageDescription{ + {Text: "Text 1"}, + {Text: "Text 2"}, + } + + err = AdaptStructureValueBasedOnPath(testConfig, path, expectedNewValue) + require.NoError(t, err) + require.Equal(t, expectedNewValue, testConfig.TestConfigNestedStruct.ConfigNestedStruct.Message.MessageDescription) + }) + + t.Run("should work on map, override and insert from config", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[36].Path, overrideConfig.OverridableConfigTomlValues[36].Value) + require.NoError(t, err) + require.Equal(t, 2, len(testConfig.TestMap.Map)) + require.Equal(t, 10, testConfig.TestMap.Map["Key1"].Number) + require.Equal(t, 11, testConfig.TestMap.Map["Key2"].Number) + }) + + t.Run("should work on map and insert from config", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + overrideConfig, err := loadOverrideConfig("../../testscommon/toml/overwrite.toml") + require.NoError(t, err) + + err = AdaptStructureValueBasedOnPath(testConfig, overrideConfig.OverridableConfigTomlValues[37].Path, overrideConfig.OverridableConfigTomlValues[37].Value) + require.NoError(t, err) + require.Equal(t, 3, len(testConfig.TestMap.Map)) + require.Equal(t, 999, testConfig.TestMap.Map["Key1"].Number) + require.Equal(t, 2, testConfig.TestMap.Map["Key2"].Number) + require.Equal(t, 3, testConfig.TestMap.Map["Key3"].Number) + }) + + t.Run("should work on map, override and insert values in map", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + expectedNewValue := make(map[string]toml.MapValues) + expectedNewValue["Key1"] = toml.MapValues{Number: 100} + expectedNewValue["Key2"] = toml.MapValues{Number: 200} + + path := "TestMap.Map" + + err = AdaptStructureValueBasedOnPath(testConfig, path, expectedNewValue) + require.NoError(t, err) + require.Equal(t, 2, len(testConfig.TestMap.Map)) + require.Equal(t, 100, testConfig.TestMap.Map["Key1"].Number) + require.Equal(t, 200, testConfig.TestMap.Map["Key2"].Number) + }) + + t.Run("should error on map when override different map", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + expectedNewValue := make(map[string]toml.MessageDescription) + expectedNewValue["Key1"] = toml.MessageDescription{Text: "A"} + expectedNewValue["Key2"] = toml.MessageDescription{Text: "B"} + + path := "TestMap.Map" + + err = AdaptStructureValueBasedOnPath(testConfig, path, expectedNewValue) + require.Equal(t, "field not found or cannot be set", err.Error()) + }) + + t.Run("should error on map when override anything else other than map", func(t *testing.T) { + t.Parallel() + + testConfig, err := loadTestConfig("../../testscommon/toml/config.toml") + require.NoError(t, err) + + expectedNewValue := 1 + + path := "TestMap.Map" + + err = AdaptStructureValueBasedOnPath(testConfig, path, expectedNewValue) + require.Equal(t, "unsupported type when trying to add value in type ", err.Error()) + }) + +} + +func loadTestConfig(filepath string) (*toml.Config, error) { + cfg := &toml.Config{} + err := core.LoadTomlFile(cfg, filepath) + if err != nil { + return nil, err + } + + return cfg, nil +} + +func loadOverrideConfig(filepath string) (*toml.OverrideConfig, error) { + cfg := &toml.OverrideConfig{} + err := core.LoadTomlFile(cfg, filepath) + if err != nil { + return nil, err + } + + return cfg, nil } func BenchmarkAdaptStructureValueBasedOnPath(b *testing.B) { diff --git a/common/statistics/disabled/stateStatistics.go b/common/statistics/disabled/stateStatistics.go index d10d310129a..c3bdf12420d 100644 --- a/common/statistics/disabled/stateStatistics.go +++ b/common/statistics/disabled/stateStatistics.go @@ -19,8 +19,8 @@ func (s *stateStatistics) Reset() { func (s *stateStatistics) ResetSnapshot() { } -// IncrCache does nothing -func (s *stateStatistics) IncrCache() { +// IncrementCache does nothing +func (s *stateStatistics) IncrementCache() { } // Cache returns zero @@ -28,8 +28,8 @@ func (s *stateStatistics) Cache() uint64 { return 0 } -// IncrSnapshotCache does nothing -func (ss *stateStatistics) IncrSnapshotCache() { +// IncrementSnapshotCache does nothing +func (ss *stateStatistics) IncrementSnapshotCache() { } // SnapshotCache returns the number of cached operations @@ -37,8 +37,8 @@ func (ss *stateStatistics) SnapshotCache() uint64 { return 0 } -// IncrPersister does nothing -func (s *stateStatistics) IncrPersister(epoch uint32) { +// IncrementPersister does nothing +func (s *stateStatistics) IncrementPersister(epoch uint32) { } // Persister returns zero @@ -46,8 +46,8 @@ func (s *stateStatistics) Persister(epoch uint32) uint64 { return 0 } -// IncrSnapshotPersister does nothing -func (ss *stateStatistics) IncrSnapshotPersister(epoch uint32) { +// IncrementSnapshotPersister does nothing +func (ss *stateStatistics) IncrementSnapshotPersister(epoch uint32) { } // SnapshotPersister returns the number of persister operations @@ -55,8 +55,8 @@ func (ss *stateStatistics) SnapshotPersister(epoch uint32) uint64 { return 0 } -// IncrTrie does nothing -func (s *stateStatistics) IncrTrie() { +// IncrementTrie does nothing +func (s *stateStatistics) IncrementTrie() { } // Trie returns zero diff --git a/common/statistics/disabled/stateStatistics_test.go b/common/statistics/disabled/stateStatistics_test.go index 7d17aa689d1..725ec3ee6a1 100644 --- a/common/statistics/disabled/stateStatistics_test.go +++ b/common/statistics/disabled/stateStatistics_test.go @@ -31,12 +31,12 @@ func TestStateStatistics_MethodsShouldNotPanic(t *testing.T) { stats.ResetSnapshot() stats.ResetAll() - stats.IncrCache() - stats.IncrSnapshotCache() - stats.IncrSnapshotCache() - stats.IncrPersister(1) - stats.IncrSnapshotPersister(1) - stats.IncrTrie() + stats.IncrementCache() + stats.IncrementSnapshotCache() + stats.IncrementSnapshotCache() + stats.IncrementPersister(1) + stats.IncrementSnapshotPersister(1) + stats.IncrementTrie() require.Equal(t, uint64(0), stats.Cache()) require.Equal(t, uint64(0), stats.SnapshotCache()) diff --git a/common/statistics/osLevel/memStats_test.go b/common/statistics/osLevel/memStats_test.go index 99724172e67..ff42ad516c2 100644 --- a/common/statistics/osLevel/memStats_test.go +++ b/common/statistics/osLevel/memStats_test.go @@ -3,12 +3,17 @@ package osLevel import ( + "runtime" "testing" "github.com/stretchr/testify/assert" ) func TestReadCurrentMemStats(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("skipping test on darwin") + } + t.Parallel() memStats, err := ReadCurrentMemStats() diff --git a/common/statistics/stateStatistics.go b/common/statistics/stateStatistics.go index c41040ab933..474dc6d47d1 100644 --- a/common/statistics/stateStatistics.go +++ b/common/statistics/stateStatistics.go @@ -51,8 +51,8 @@ func (ss *stateStatistics) ResetSnapshot() { ss.mutPersisters.Unlock() } -// IncrCache will increment cache counter -func (ss *stateStatistics) IncrCache() { +// IncrementCache will increment cache counter +func (ss *stateStatistics) IncrementCache() { atomic.AddUint64(&ss.numCache, 1) } @@ -61,8 +61,8 @@ func (ss *stateStatistics) Cache() uint64 { return atomic.LoadUint64(&ss.numCache) } -// IncrSnapshotCache will increment snapshot cache counter -func (ss *stateStatistics) IncrSnapshotCache() { +// IncrementSnapshotCache will increment snapshot cache counter +func (ss *stateStatistics) IncrementSnapshotCache() { atomic.AddUint64(&ss.numSnapshotCache, 1) } @@ -71,8 +71,8 @@ func (ss *stateStatistics) SnapshotCache() uint64 { return atomic.LoadUint64(&ss.numSnapshotCache) } -// IncrPersister will increment persister counter -func (ss *stateStatistics) IncrPersister(epoch uint32) { +// IncrementPersister will increment persister counter +func (ss *stateStatistics) IncrementPersister(epoch uint32) { ss.mutPersisters.Lock() defer ss.mutPersisters.Unlock() @@ -87,8 +87,8 @@ func (ss *stateStatistics) Persister(epoch uint32) uint64 { return ss.numPersister[epoch] } -// IncrSnapshotPersister will increment snapshot persister counter -func (ss *stateStatistics) IncrSnapshotPersister(epoch uint32) { +// IncrementSnapshotPersister will increment snapshot persister counter +func (ss *stateStatistics) IncrementSnapshotPersister(epoch uint32) { ss.mutPersisters.Lock() defer ss.mutPersisters.Unlock() @@ -103,8 +103,8 @@ func (ss *stateStatistics) SnapshotPersister(epoch uint32) uint64 { return ss.numSnapshotPersister[epoch] } -// IncrTrie will increment trie counter -func (ss *stateStatistics) IncrTrie() { +// IncrementTrie will increment trie counter +func (ss *stateStatistics) IncrementTrie() { atomic.AddUint64(&ss.numTrie, 1) } diff --git a/common/statistics/stateStatistics_test.go b/common/statistics/stateStatistics_test.go index e1beaf9d35b..674b3d8ea6b 100644 --- a/common/statistics/stateStatistics_test.go +++ b/common/statistics/stateStatistics_test.go @@ -27,11 +27,11 @@ func TestStateStatistics_Processing(t *testing.T) { assert.Equal(t, uint64(0), ss.Trie()) - ss.IncrTrie() - ss.IncrTrie() + ss.IncrementTrie() + ss.IncrementTrie() assert.Equal(t, uint64(2), ss.Trie()) - ss.IncrTrie() + ss.IncrementTrie() assert.Equal(t, uint64(3), ss.Trie()) ss.Reset() @@ -47,11 +47,11 @@ func TestStateStatistics_Processing(t *testing.T) { assert.Equal(t, uint64(0), ss.Persister(epoch)) - ss.IncrPersister(epoch) - ss.IncrPersister(epoch) + ss.IncrementPersister(epoch) + ss.IncrementPersister(epoch) assert.Equal(t, uint64(2), ss.Persister(epoch)) - ss.IncrPersister(epoch) + ss.IncrementPersister(epoch) assert.Equal(t, uint64(3), ss.Persister(epoch)) ss.Reset() @@ -65,11 +65,11 @@ func TestStateStatistics_Processing(t *testing.T) { assert.Equal(t, uint64(0), ss.Cache()) - ss.IncrCache() - ss.IncrCache() + ss.IncrementCache() + ss.IncrementCache() assert.Equal(t, uint64(2), ss.Cache()) - ss.IncrCache() + ss.IncrementCache() assert.Equal(t, uint64(3), ss.Cache()) ss.Reset() @@ -89,11 +89,11 @@ func TestStateStatistics_Snapshot(t *testing.T) { assert.Equal(t, uint64(0), ss.SnapshotPersister(epoch)) - ss.IncrSnapshotPersister(epoch) - ss.IncrSnapshotPersister(epoch) + ss.IncrementSnapshotPersister(epoch) + ss.IncrementSnapshotPersister(epoch) assert.Equal(t, uint64(2), ss.SnapshotPersister(epoch)) - ss.IncrSnapshotPersister(epoch) + ss.IncrementSnapshotPersister(epoch) assert.Equal(t, uint64(3), ss.SnapshotPersister(epoch)) ss.ResetSnapshot() @@ -107,11 +107,11 @@ func TestStateStatistics_Snapshot(t *testing.T) { assert.Equal(t, uint64(0), ss.Cache()) - ss.IncrSnapshotCache() - ss.IncrSnapshotCache() + ss.IncrementSnapshotCache() + ss.IncrementSnapshotCache() assert.Equal(t, uint64(2), ss.SnapshotCache()) - ss.IncrSnapshotCache() + ss.IncrementSnapshotCache() assert.Equal(t, uint64(3), ss.SnapshotCache()) ss.ResetSnapshot() @@ -144,11 +144,11 @@ func TestStateStatistics_ConcurrenyOperations(t *testing.T) { case 0: ss.Reset() case 1: - ss.IncrCache() + ss.IncrementCache() case 2: - ss.IncrPersister(epoch) + ss.IncrementPersister(epoch) case 3: - ss.IncrTrie() + ss.IncrementTrie() case 7: _ = ss.Cache() case 8: diff --git a/common/validatorInfo/validatorInfoUtils.go b/common/validatorInfo/validatorInfoUtils.go index e6cf36ba52a..20f4e97897a 100644 --- a/common/validatorInfo/validatorInfoUtils.go +++ b/common/validatorInfo/validatorInfoUtils.go @@ -6,41 +6,41 @@ import ( ) // WasActiveInCurrentEpoch returns true if the node was active in current epoch -func WasActiveInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +func WasActiveInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - active := valInfo.LeaderFailure > 0 || valInfo.LeaderSuccess > 0 || valInfo.ValidatorSuccess > 0 || valInfo.ValidatorFailure > 0 + active := valInfo.GetLeaderFailure() > 0 || valInfo.GetLeaderSuccess() > 0 || valInfo.GetValidatorSuccess() > 0 || valInfo.GetValidatorFailure() > 0 return active } // WasLeavingEligibleInCurrentEpoch returns true if the validator was eligible in the epoch but has done an unstake. -func WasLeavingEligibleInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +func WasLeavingEligibleInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - return valInfo.List == string(common.LeavingList) && WasActiveInCurrentEpoch(valInfo) + return valInfo.GetList() == string(common.LeavingList) && WasActiveInCurrentEpoch(valInfo) } // WasJailedEligibleInCurrentEpoch returns true if the validator was jailed in the epoch but also active/eligible due to not enough -//nodes in shard. -func WasJailedEligibleInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +// nodes in shard. +func WasJailedEligibleInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - return valInfo.List == string(common.JailedList) && WasActiveInCurrentEpoch(valInfo) + return valInfo.GetList() == string(common.JailedList) && WasActiveInCurrentEpoch(valInfo) } // WasEligibleInCurrentEpoch returns true if the validator was eligible for consensus in current epoch -func WasEligibleInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +func WasEligibleInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - wasEligibleInShard := valInfo.List == string(common.EligibleList) || + wasEligibleInShard := valInfo.GetList() == string(common.EligibleList) || WasLeavingEligibleInCurrentEpoch(valInfo) || WasJailedEligibleInCurrentEpoch(valInfo) diff --git a/config/config.go b/config/config.go index 5c489635269..412ab59f776 100644 --- a/config/config.go +++ b/config/config.go @@ -88,12 +88,14 @@ type EvictionWaitingListConfig struct { // EpochStartConfig will hold the configuration of EpochStart settings type EpochStartConfig struct { - MinRoundsBetweenEpochs int64 - RoundsPerEpoch int64 - MinShuffledOutRestartThreshold float64 - MaxShuffledOutRestartThreshold float64 - MinNumConnectedPeersToStart int - MinNumOfPeersToConsiderBlockValid int + MinRoundsBetweenEpochs int64 + RoundsPerEpoch int64 + MinShuffledOutRestartThreshold float64 + MaxShuffledOutRestartThreshold float64 + MinNumConnectedPeersToStart int + MinNumOfPeersToConsiderBlockValid int + ExtraDelayForRequestBlockInfoInMilliseconds int + GenesisEpoch uint32 } // BlockSizeThrottleConfig will hold the configuration for adaptive block size throttle @@ -193,15 +195,16 @@ type Config struct { PublicKeyPIDSignature CacheConfig PeerHonesty CacheConfig - Antiflood AntifloodConfig - WebServerAntiflood WebServerAntifloodConfig - ResourceStats ResourceStatsConfig - HeartbeatV2 HeartbeatV2Config - ValidatorStatistics ValidatorStatisticsConfig - GeneralSettings GeneralSettingsConfig - Consensus ConsensusConfig - StoragePruning StoragePruningConfig - LogsAndEvents LogsAndEventsConfig + Antiflood AntifloodConfig + WebServerAntiflood WebServerAntifloodConfig + ResourceStats ResourceStatsConfig + HeartbeatV2 HeartbeatV2Config + ValidatorStatistics ValidatorStatisticsConfig + GeneralSettings GeneralSettingsConfig + Consensus ConsensusConfig + StoragePruning StoragePruningConfig + LogsAndEvents LogsAndEventsConfig + HardwareRequirements HardwareRequirementsConfig NTPConfig NTPConfig HeadersPoolConfig HeadersPoolConfig @@ -225,6 +228,8 @@ type Config struct { PeersRatingConfig PeersRatingConfig PoolsCleanersConfig PoolsCleanersConfig Redundancy RedundancyConfig + + RelayedTransactionConfig RelayedTransactionConfig } // PeersRatingConfig will hold settings related to peers rating @@ -289,6 +294,11 @@ type GeneralSettingsConfig struct { SetGuardianEpochsDelay uint32 } +// HardwareRequirementsConfig will hold the hardware requirements config +type HardwareRequirementsConfig struct { + CPUFlags []string +} + // FacadeConfig will hold different configuration option that will be passed to the node facade type FacadeConfig struct { RestApiInterface string @@ -359,15 +369,16 @@ type TxAccumulatorConfig struct { // AntifloodConfig will hold all p2p antiflood parameters type AntifloodConfig struct { - Enabled bool - NumConcurrentResolverJobs int32 - OutOfSpecs FloodPreventerConfig - FastReacting FloodPreventerConfig - SlowReacting FloodPreventerConfig - PeerMaxOutput AntifloodLimitsConfig - Cache CacheConfig - Topic TopicAntifloodConfig - TxAccumulator TxAccumulatorConfig + Enabled bool + NumConcurrentResolverJobs int32 + NumConcurrentResolvingTrieNodesJobs int32 + OutOfSpecs FloodPreventerConfig + FastReacting FloodPreventerConfig + SlowReacting FloodPreventerConfig + PeerMaxOutput AntifloodLimitsConfig + Cache CacheConfig + Topic TopicAntifloodConfig + TxAccumulator TxAccumulatorConfig } // FloodPreventerConfig will hold all flood preventer parameters @@ -404,6 +415,7 @@ type VirtualMachineConfig struct { WasmVMVersions []WasmVMVersionByEpoch TimeOutForSCExecutionInMilliseconds uint32 WasmerSIGSEGVPassthrough bool + TransferAndExecuteByUserAddresses []string } // WasmVMVersionByEpoch represents the Wasm VM version to be used starting with an epoch @@ -630,3 +642,8 @@ type PoolsCleanersConfig struct { type RedundancyConfig struct { MaxRoundsOfInactivityAccepted int } + +// RelayedTransactionConfig represents the config options to be used for relayed transactions +type RelayedTransactionConfig struct { + MaxTransactionsAllowed int +} diff --git a/config/configChecker.go b/config/configChecker.go new file mode 100644 index 00000000000..11ddc7eff9a --- /dev/null +++ b/config/configChecker.go @@ -0,0 +1,103 @@ +package config + +import ( + "fmt" + + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("config-checker") + +// SanityCheckNodesConfig checks if the nodes limit setup is set correctly +func SanityCheckNodesConfig( + nodesSetup NodesSetupHandler, + cfg EnableEpochs, +) error { + maxNodesChange := cfg.MaxNodesChangeEnableEpoch + for _, maxNodesConfig := range maxNodesChange { + err := checkMaxNodesConfig(nodesSetup, maxNodesConfig) + if err != nil { + return fmt.Errorf("%w in MaxNodesChangeConfig at EpochEnable = %d", err, maxNodesConfig.EpochEnable) + } + } + + return sanityCheckEnableEpochsStakingV4(cfg, nodesSetup.NumberOfShards()) +} + +func checkMaxNodesConfig( + nodesSetup NodesSetupHandler, + maxNodesConfig MaxNodesChangeConfig, +) error { + maxNumNodes := maxNodesConfig.MaxNumNodes + minNumNodesWithHysteresis := nodesSetup.MinNumberOfNodesWithHysteresis() + if maxNumNodes < minNumNodesWithHysteresis { + return fmt.Errorf("%w, maxNumNodes: %d, minNumNodesWithHysteresis: %d", + errInvalidMaxMinNodes, maxNumNodes, minNumNodesWithHysteresis) + } + + return nil +} + +// sanityCheckEnableEpochsStakingV4 checks if the enable epoch configs for stakingV4 are set correctly +func sanityCheckEnableEpochsStakingV4(enableEpochsCfg EnableEpochs, numOfShards uint32) error { + if !areStakingV4StepsInOrder(enableEpochsCfg) { + return errStakingV4StepsNotInOrder + } + + return checkStakingV4MaxNodesChangeCfg(enableEpochsCfg, numOfShards) +} + +func areStakingV4StepsInOrder(enableEpochsCfg EnableEpochs) bool { + return (enableEpochsCfg.StakingV4Step1EnableEpoch == enableEpochsCfg.StakingV4Step2EnableEpoch-1) && + (enableEpochsCfg.StakingV4Step2EnableEpoch == enableEpochsCfg.StakingV4Step3EnableEpoch-1) +} + +func checkStakingV4MaxNodesChangeCfg(enableEpochsCfg EnableEpochs, numOfShards uint32) error { + maxNodesChangeCfg := enableEpochsCfg.MaxNodesChangeEnableEpoch + if len(maxNodesChangeCfg) <= 1 { + return nil + } + + maxNodesConfigAdaptedForStakingV4 := false + + for idx, currMaxNodesChangeCfg := range maxNodesChangeCfg { + if currMaxNodesChangeCfg.EpochEnable == enableEpochsCfg.StakingV4Step3EnableEpoch { + maxNodesConfigAdaptedForStakingV4 = true + + if idx == 0 { + log.Warn(fmt.Errorf("found config change in MaxNodesChangeEnableEpoch for StakingV4Step3EnableEpoch = %d, but %w ", + enableEpochsCfg.StakingV4Step3EnableEpoch, errNoMaxNodesConfigBeforeStakingV4).Error()) + break + } + + prevMaxNodesChange := maxNodesChangeCfg[idx-1] + err := checkMaxNodesChangedCorrectly(prevMaxNodesChange, currMaxNodesChangeCfg, numOfShards) + if err != nil { + return err + } + + break + } + } + + if !maxNodesConfigAdaptedForStakingV4 { + return fmt.Errorf("%w = %d", errNoMaxNodesConfigChangeForStakingV4, enableEpochsCfg.StakingV4Step3EnableEpoch) + } + + return nil +} + +func checkMaxNodesChangedCorrectly(prevMaxNodesChange MaxNodesChangeConfig, currMaxNodesChange MaxNodesChangeConfig, numOfShards uint32) error { + if prevMaxNodesChange.NodesToShufflePerShard != currMaxNodesChange.NodesToShufflePerShard { + return errMismatchNodesToShuffle + } + + totalShuffled := (numOfShards + 1) * prevMaxNodesChange.NodesToShufflePerShard + expectedMaxNumNodes := prevMaxNodesChange.MaxNumNodes - totalShuffled + if expectedMaxNumNodes != currMaxNodesChange.MaxNumNodes { + return fmt.Errorf("expected MaxNodesChangeEnableEpoch.MaxNumNodes for StakingV4Step3EnableEpoch = %d, but got %d", + expectedMaxNumNodes, currMaxNodesChange.MaxNumNodes) + } + + return nil +} diff --git a/config/configChecker_test.go b/config/configChecker_test.go new file mode 100644 index 00000000000..ec993631fbb --- /dev/null +++ b/config/configChecker_test.go @@ -0,0 +1,382 @@ +package config + +import ( + "strings" + "testing" + + "github.com/multiversx/mx-chain-go/testscommon/nodesSetupMock" + "github.com/stretchr/testify/require" +) + +const numOfShards = 3 + +func generateCorrectConfig() EnableEpochs { + return EnableEpochs{ + StakingV4Step1EnableEpoch: 4, + StakingV4Step2EnableEpoch: 5, + StakingV4Step3EnableEpoch: 6, + MaxNodesChangeEnableEpoch: []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + }, + } +} + +func TestSanityCheckEnableEpochsStakingV4(t *testing.T) { + t.Parallel() + + t.Run("correct config, should work", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Nil(t, err) + }) + + t.Run("staking v4 steps not in ascending order, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.StakingV4Step1EnableEpoch = 5 + cfg.StakingV4Step2EnableEpoch = 5 + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + + cfg = generateCorrectConfig() + cfg.StakingV4Step2EnableEpoch = 5 + cfg.StakingV4Step3EnableEpoch = 4 + err = sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + }) + + t.Run("staking v4 steps not in cardinal order, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + + cfg.StakingV4Step1EnableEpoch = 1 + cfg.StakingV4Step2EnableEpoch = 3 + cfg.StakingV4Step3EnableEpoch = 6 + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + + cfg.StakingV4Step1EnableEpoch = 1 + cfg.StakingV4Step2EnableEpoch = 2 + cfg.StakingV4Step3EnableEpoch = 6 + err = sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + + cfg.StakingV4Step1EnableEpoch = 1 + cfg.StakingV4Step2EnableEpoch = 5 + cfg.StakingV4Step3EnableEpoch = 6 + err = sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + }) + + t.Run("no previous config for max nodes change with one entry, should not return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Nil(t, err) + }) + + t.Run("no max nodes config change for StakingV4Step3EnableEpoch, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 444, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), errNoMaxNodesConfigChangeForStakingV4.Error())) + require.True(t, strings.Contains(err.Error(), "6")) + }) + + t.Run("max nodes config change for StakingV4Step3EnableEpoch has no previous config change, should not error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: cfg.StakingV4Step3EnableEpoch, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 444, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + } + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Nil(t, err) + }) + + t.Run("stakingV4 config for max nodes changed with different nodes to shuffle, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch[1].NodesToShufflePerShard = 2 + cfg.MaxNodesChangeEnableEpoch[2].NodesToShufflePerShard = 4 + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.ErrorIs(t, err, errMismatchNodesToShuffle) + }) + + t.Run("stakingV4 config for max nodes changed with wrong max num nodes, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch[2].MaxNumNodes = 56 + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), "expected")) + require.True(t, strings.Contains(err.Error(), "48")) + require.True(t, strings.Contains(err.Error(), "got")) + require.True(t, strings.Contains(err.Error(), "56")) + }) +} + +func TestSanityCheckNodesConfig(t *testing.T) { + t.Parallel() + + numShards := uint32(3) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + nodesSetup := &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0, + MinNumberOfMetaNodesField: 5, + MinNumberOfShardNodesField: 5, + } + err := SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 1, + MaxNumNodes: 3200, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 2, + MaxNumNodes: 2880, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 3, + MaxNumNodes: 2240, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 4, + MaxNumNodes: 2240, + NodesToShufflePerShard: 40, + }, + { + EpochEnable: 6, + MaxNumNodes: 2080, + NodesToShufflePerShard: 40, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0, + MinNumberOfMetaNodesField: 3, + MinNumberOfShardNodesField: 3, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 7, + MinNumberOfShardNodesField: 7, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 48, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 10, + MinNumberOfShardNodesField: 10, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 2169, + NodesToShufflePerShard: 143, + }, + { + EpochEnable: 1, + MaxNumNodes: 3200, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 6, + MaxNumNodes: 2880, + NodesToShufflePerShard: 80, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + }) + + t.Run("zero nodes to shuffle per shard, should not return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 4, + MaxNumNodes: 3200, + NodesToShufflePerShard: 0, + }, + { + EpochEnable: 6, + MaxNumNodes: 3200, + NodesToShufflePerShard: 0, + }, + } + nodesSetup := &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err := SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + }) + + t.Run("maxNumNodes < minNumNodesWithHysteresis, should return error ", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 4, + MaxNumNodes: 1900, + NodesToShufflePerShard: 80, + }, + } + nodesSetup := &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err := SanityCheckNodesConfig(nodesSetup, cfg) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), errInvalidMaxMinNodes.Error())) + require.True(t, strings.Contains(err.Error(), "maxNumNodes: 1900")) + require.True(t, strings.Contains(err.Error(), "minNumNodesWithHysteresis: 1920")) + }) +} diff --git a/config/epochConfig.go b/config/epochConfig.go index 92730e246a9..2aaa96426d9 100644 --- a/config/epochConfig.go +++ b/config/epochConfig.go @@ -45,13 +45,11 @@ type EnableEpochs struct { SaveJailedAlwaysEnableEpoch uint32 ValidatorToDelegationEnableEpoch uint32 ReDelegateBelowMinCheckEnableEpoch uint32 - WaitingListFixEnableEpoch uint32 IncrementSCRNonceInMultiTransferEnableEpoch uint32 ScheduledMiniBlocksEnableEpoch uint32 ESDTMultiTransferEnableEpoch uint32 GlobalMintBurnDisableEpoch uint32 ESDTTransferRoleEnableEpoch uint32 - BuiltInFunctionOnMetaEnableEpoch uint32 ComputeRewardCheckpointEnableEpoch uint32 SCRSizeInvariantCheckEnableEpoch uint32 BackwardCompSaveKeyValueEnableEpoch uint32 @@ -104,6 +102,7 @@ type EnableEpochs struct { MultiClaimOnDelegationEnableEpoch uint32 ChangeUsernameEnableEpoch uint32 AutoBalanceDataTriesEnableEpoch uint32 + MigrateDataTrieEnableEpoch uint32 ConsistentTokensValuesLengthCheckEnableEpoch uint32 FixDelegationChangeOwnerOnAccountEnableEpoch uint32 DynamicGasCostForDataTrieStorageLoadEnableEpoch uint32 @@ -111,6 +110,19 @@ type EnableEpochs struct { ChangeOwnerAddressCrossShardThroughSCEnableEpoch uint32 FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch uint32 CurrentRandomnessOnSortingEnableEpoch uint32 + StakeLimitsEnableEpoch uint32 + StakingV4Step1EnableEpoch uint32 + StakingV4Step2EnableEpoch uint32 + StakingV4Step3EnableEpoch uint32 + CleanupAuctionOnLowWaitingListEnableEpoch uint32 + AlwaysMergeContextsInEEIEnableEpoch uint32 + DynamicESDTEnableEpoch uint32 + EGLDInMultiTransferEnableEpoch uint32 + CryptoOpcodesV2EnableEpoch uint32 + UnJailCleanupEnableEpoch uint32 + RelayedTransactionsV3EnableEpoch uint32 + FixRelayedBaseCostEnableEpoch uint32 + MultiESDTNFTTransferAndExecuteByUserEnableEpoch uint32 BLSMultiSignerEnableEpoch []MultiSignerConfig } diff --git a/config/errors.go b/config/errors.go new file mode 100644 index 00000000000..6161ef4c168 --- /dev/null +++ b/config/errors.go @@ -0,0 +1,13 @@ +package config + +import "errors" + +var errStakingV4StepsNotInOrder = errors.New("staking v4 enable epoch steps should be in cardinal order(e.g.: StakingV4Step1EnableEpoch = 2, StakingV4Step2EnableEpoch = 3, StakingV4Step3EnableEpoch = 4)") + +var errNoMaxNodesConfigBeforeStakingV4 = errors.New("no previous config change entry in MaxNodesChangeEnableEpoch before entry with EpochEnable = StakingV4Step3EnableEpoch") + +var errMismatchNodesToShuffle = errors.New("previous MaxNodesChangeEnableEpoch.NodesToShufflePerShard != MaxNodesChangeEnableEpoch.NodesToShufflePerShard with EnableEpoch = StakingV4Step3EnableEpoch") + +var errNoMaxNodesConfigChangeForStakingV4 = errors.New("no MaxNodesChangeEnableEpoch config found for EpochEnable = StakingV4Step3EnableEpoch") + +var errInvalidMaxMinNodes = errors.New("number of min nodes with hysteresis > number of max nodes") diff --git a/config/interface.go b/config/interface.go new file mode 100644 index 00000000000..859e845c434 --- /dev/null +++ b/config/interface.go @@ -0,0 +1,7 @@ +package config + +// NodesSetupHandler provides nodes setup information +type NodesSetupHandler interface { + MinNumberOfNodesWithHysteresis() uint32 + NumberOfShards() uint32 +} diff --git a/config/overridableConfig/configOverriding.go b/config/overridableConfig/configOverriding.go index 7e9f3a153de..84b823738fe 100644 --- a/config/overridableConfig/configOverriding.go +++ b/config/overridableConfig/configOverriding.go @@ -10,16 +10,32 @@ import ( ) const ( + apiTomlFile = "api.toml" configTomlFile = "config.toml" + economicsTomlFile = "economics.toml" enableEpochsTomlFile = "enableEpochs.toml" - p2pTomlFile = "p2p.toml" - fullArchiveP2PTomlFile = "fullArchiveP2P.toml" + enableRoundsTomlFile = "enableRounds.toml" externalTomlFile = "external.toml" + fullArchiveP2PTomlFile = "fullArchiveP2P.toml" + p2pTomlFile = "p2p.toml" + ratingsTomlFile = "ratings.toml" + systemSCTomlFile = "systemSmartContractsConfig.toml" ) var ( - availableConfigFilesForOverriding = []string{configTomlFile, enableEpochsTomlFile, p2pTomlFile, externalTomlFile} - log = logger.GetOrCreate("config") + availableConfigFilesForOverriding = []string{ + apiTomlFile, + configTomlFile, + economicsTomlFile, + enableEpochsTomlFile, + enableRoundsTomlFile, + externalTomlFile, + fullArchiveP2PTomlFile, + p2pTomlFile, + ratingsTomlFile, + systemSCTomlFile, + } + log = logger.GetOrCreate("config") ) // OverrideConfigValues will override config values for the specified configurations @@ -27,16 +43,27 @@ func OverrideConfigValues(newConfigs []config.OverridableConfig, configs *config var err error for _, newConfig := range newConfigs { switch newConfig.File { + case apiTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.ApiRoutesConfig, newConfig.Path, newConfig.Value) case configTomlFile: err = reflectcommon.AdaptStructureValueBasedOnPath(configs.GeneralConfig, newConfig.Path, newConfig.Value) + case economicsTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.EconomicsConfig, newConfig.Path, newConfig.Value) case enableEpochsTomlFile: err = reflectcommon.AdaptStructureValueBasedOnPath(configs.EpochConfig, newConfig.Path, newConfig.Value) - case p2pTomlFile: - err = reflectcommon.AdaptStructureValueBasedOnPath(configs.MainP2pConfig, newConfig.Path, newConfig.Value) - case fullArchiveP2PTomlFile: - err = reflectcommon.AdaptStructureValueBasedOnPath(configs.FullArchiveP2pConfig, newConfig.Path, newConfig.Value) + case enableRoundsTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.RoundConfig, newConfig.Path, newConfig.Value) case externalTomlFile: err = reflectcommon.AdaptStructureValueBasedOnPath(configs.ExternalConfig, newConfig.Path, newConfig.Value) + case fullArchiveP2PTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.FullArchiveP2pConfig, newConfig.Path, newConfig.Value) + case p2pTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.MainP2pConfig, newConfig.Path, newConfig.Value) + case ratingsTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.RatingsConfig, newConfig.Path, newConfig.Value) + case systemSCTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.SystemSCConfig, newConfig.Path, newConfig.Value) + default: err = fmt.Errorf("invalid config file <%s>. Available options are %s", newConfig.File, strings.Join(availableConfigFilesForOverriding, ",")) } diff --git a/config/overridableConfig/configOverriding_test.go b/config/overridableConfig/configOverriding_test.go index b15cf8e5c5c..9f187a8a501 100644 --- a/config/overridableConfig/configOverriding_test.go +++ b/config/overridableConfig/configOverriding_test.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-go/config" p2pConfig "github.com/multiversx/mx-chain-go/p2p/config" + "github.com/stretchr/testify/require" ) @@ -22,7 +23,8 @@ func TestOverrideConfigValues(t *testing.T) { t.Parallel() err := OverrideConfigValues([]config.OverridableConfig{{File: "invalid.toml"}}, &config.Configs{}) - require.Equal(t, "invalid config file . Available options are config.toml,enableEpochs.toml,p2p.toml,external.toml", err.Error()) + availableOptionsString := "api.toml,config.toml,economics.toml,enableEpochs.toml,enableRounds.toml,external.toml,fullArchiveP2P.toml,p2p.toml,ratings.toml,systemSmartContractsConfig.toml" + require.Equal(t, "invalid config file . Available options are "+availableOptionsString, err.Error()) }) t.Run("nil config, should error", func(t *testing.T) { @@ -47,7 +49,7 @@ func TestOverrideConfigValues(t *testing.T) { configs := &config.Configs{MainP2pConfig: &p2pConfig.P2PConfig{Sharding: p2pConfig.ShardingConfig{TargetPeerCount: 5}}} - err := OverrideConfigValues([]config.OverridableConfig{{Path: "Sharding.TargetPeerCount", Value: "37", File: "p2p.toml"}}, configs) + err := OverrideConfigValues([]config.OverridableConfig{{Path: "Sharding.TargetPeerCount", Value: uint32(37), File: "p2p.toml"}}, configs) require.NoError(t, err) require.Equal(t, uint32(37), configs.MainP2pConfig.Sharding.TargetPeerCount) }) @@ -57,7 +59,7 @@ func TestOverrideConfigValues(t *testing.T) { configs := &config.Configs{FullArchiveP2pConfig: &p2pConfig.P2PConfig{Sharding: p2pConfig.ShardingConfig{TargetPeerCount: 5}}} - err := OverrideConfigValues([]config.OverridableConfig{{Path: "Sharding.TargetPeerCount", Value: "37", File: "fullArchiveP2P.toml"}}, configs) + err := OverrideConfigValues([]config.OverridableConfig{{Path: "Sharding.TargetPeerCount", Value: uint32(37), File: "fullArchiveP2P.toml"}}, configs) require.NoError(t, err) require.Equal(t, uint32(37), configs.FullArchiveP2pConfig.Sharding.TargetPeerCount) }) @@ -77,8 +79,88 @@ func TestOverrideConfigValues(t *testing.T) { configs := &config.Configs{EpochConfig: &config.EpochConfig{EnableEpochs: config.EnableEpochs{ESDTMetadataContinuousCleanupEnableEpoch: 5}}} - err := OverrideConfigValues([]config.OverridableConfig{{Path: "EnableEpochs.ESDTMetadataContinuousCleanupEnableEpoch", Value: "37", File: "enableEpochs.toml"}}, configs) + err := OverrideConfigValues([]config.OverridableConfig{{Path: "EnableEpochs.ESDTMetadataContinuousCleanupEnableEpoch", Value: uint32(37), File: "enableEpochs.toml"}}, configs) require.NoError(t, err) require.Equal(t, uint32(37), configs.EpochConfig.EnableEpochs.ESDTMetadataContinuousCleanupEnableEpoch) }) + + t.Run("should work for api.toml", func(t *testing.T) { + t.Parallel() + + configs := &config.Configs{ApiRoutesConfig: &config.ApiRoutesConfig{}} + + err := OverrideConfigValues([]config.OverridableConfig{{Path: "Logging.LoggingEnabled", Value: true, File: "api.toml"}}, configs) + require.NoError(t, err) + require.True(t, configs.ApiRoutesConfig.Logging.LoggingEnabled) + }) + + t.Run("should work for economics.toml", func(t *testing.T) { + t.Parallel() + + configs := &config.Configs{EconomicsConfig: &config.EconomicsConfig{}} + + err := OverrideConfigValues([]config.OverridableConfig{{Path: "GlobalSettings.GenesisTotalSupply", Value: "37", File: "economics.toml"}}, configs) + require.NoError(t, err) + require.Equal(t, "37", configs.EconomicsConfig.GlobalSettings.GenesisTotalSupply) + }) + + t.Run("should work for enableRounds.toml", func(t *testing.T) { + t.Parallel() + + configs := &config.Configs{RoundConfig: &config.RoundConfig{}} + value := make(map[string]config.ActivationRoundByName) + value["DisableAsyncCallV1"] = config.ActivationRoundByName{Round: "37"} + + err := OverrideConfigValues([]config.OverridableConfig{{Path: "RoundActivations", Value: value, File: "enableRounds.toml"}}, configs) + require.NoError(t, err) + require.Equal(t, "37", configs.RoundConfig.RoundActivations["DisableAsyncCallV1"].Round) + }) + + t.Run("should work for ratings.toml", func(t *testing.T) { + t.Parallel() + + configs := &config.Configs{RatingsConfig: &config.RatingsConfig{}} + + err := OverrideConfigValues([]config.OverridableConfig{{Path: "General.StartRating", Value: 37, File: "ratings.toml"}}, configs) + require.NoError(t, err) + require.Equal(t, uint32(37), configs.RatingsConfig.General.StartRating) + }) + + t.Run("should work for systemSmartContractsConfig.toml", func(t *testing.T) { + t.Parallel() + + configs := &config.Configs{SystemSCConfig: &config.SystemSmartContractsConfig{}} + + err := OverrideConfigValues([]config.OverridableConfig{{Path: "StakingSystemSCConfig.UnBondPeriod", Value: 37, File: "systemSmartContractsConfig.toml"}}, configs) + require.NoError(t, err) + require.Equal(t, uint64(37), configs.SystemSCConfig.StakingSystemSCConfig.UnBondPeriod) + }) + + t.Run("should work for go struct overwrite", func(t *testing.T) { + t.Parallel() + + configs := &config.Configs{ + GeneralConfig: &config.Config{ + VirtualMachine: config.VirtualMachineServicesConfig{ + Execution: config.VirtualMachineConfig{ + WasmVMVersions: []config.WasmVMVersionByEpoch{ + {StartEpoch: 0, Version: "1.3"}, + {StartEpoch: 1, Version: "1.4"}, + {StartEpoch: 2, Version: "1.5"}, + }, + }, + }, + }, + } + require.Equal(t, len(configs.GeneralConfig.VirtualMachine.Execution.WasmVMVersions), 3) + + newWasmVMVersion := []config.WasmVMVersionByEpoch{ + {StartEpoch: 0, Version: "1.5"}, + } + + err := OverrideConfigValues([]config.OverridableConfig{{Path: "VirtualMachine.Execution.WasmVMVersions", Value: newWasmVMVersion, File: "config.toml"}}, configs) + require.NoError(t, err) + require.Equal(t, len(configs.GeneralConfig.VirtualMachine.Execution.WasmVMVersions), 1) + require.Equal(t, newWasmVMVersion, configs.GeneralConfig.VirtualMachine.Execution.WasmVMVersions) + }) } diff --git a/config/prefsConfig.go b/config/prefsConfig.go index 34861d647e8..2659e592364 100644 --- a/config/prefsConfig.go +++ b/config/prefsConfig.go @@ -23,7 +23,7 @@ type PreferencesConfig struct { type OverridableConfig struct { File string Path string - Value string + Value interface{} } // BlockProcessingCutoffConfig holds the configuration for the block processing cutoff diff --git a/config/ratingsConfig.go b/config/ratingsConfig.go index 3558a32f446..a4c243cd51b 100644 --- a/config/ratingsConfig.go +++ b/config/ratingsConfig.go @@ -27,7 +27,7 @@ type MetaChain struct { RatingSteps } -//RatingValue will hold different rating options with increase and decrease steps +// RatingValue will hold different rating options with increase and decrease steps type RatingValue struct { Name string Value int32 diff --git a/config/systemSmartContractsConfig.go b/config/systemSmartContractsConfig.go index ef13d808351..9ce67ce3208 100644 --- a/config/systemSmartContractsConfig.go +++ b/config/systemSmartContractsConfig.go @@ -7,6 +7,7 @@ type SystemSmartContractsConfig struct { StakingSystemSCConfig StakingSystemSCConfig DelegationManagerSystemSCConfig DelegationManagerSystemSCConfig DelegationSystemSCConfig DelegationSystemSCConfig + SoftAuctionConfig SoftAuctionConfig } // StakingSystemSCConfig will hold the staking system smart contract settings @@ -23,6 +24,8 @@ type StakingSystemSCConfig struct { BleedPercentagePerRound float64 MaxNumberOfNodesForStake uint64 ActivateBLSPubKeyMessageVerification bool + StakeLimitPercentage float64 + NodeLimitPercentage float64 } // ESDTSystemSCConfig defines a set of constant to initialize the esdt system smart contract @@ -72,3 +75,11 @@ type DelegationSystemSCConfig struct { MaxServiceFee uint64 AddTokensWhitelistedAddress string } + +// SoftAuctionConfig represents the config options for soft auction selecting used in staking v4 +type SoftAuctionConfig struct { + TopUpStep string + MinTopUp string + MaxTopUp string + MaxNumberOfIterations uint64 +} diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 7d7b0912625..a6d1f29a4c3 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -5,10 +5,11 @@ import ( "strconv" "testing" - p2pConfig "github.com/multiversx/mx-chain-go/p2p/config" "github.com/pelletier/go-toml" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + p2pConfig "github.com/multiversx/mx-chain-go/p2p/config" ) func TestTomlParser(t *testing.T) { @@ -102,6 +103,10 @@ func TestTomlParser(t *testing.T) { WasmVMVersions: wasmVMVersions, TimeOutForSCExecutionInMilliseconds: 10000, WasmerSIGSEGVPassthrough: true, + TransferAndExecuteByUserAddresses: []string{ + "erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe0", + "erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe1", + "erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe2"}, }, Querying: QueryVirtualMachineConfig{ NumConcurrentVMs: 16, @@ -199,6 +204,11 @@ func TestTomlParser(t *testing.T) { { StartEpoch = 12, Version = "v0.3" }, { StartEpoch = 88, Version = "v1.2" }, ] + TransferAndExecuteByUserAddresses = [ + "erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe0", #shard 0 + "erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe1", #shard 1 + "erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe2", #shard 2 + ] [VirtualMachine.Querying] NumConcurrentVMs = 16 @@ -472,7 +482,8 @@ func TestAPIRoutesToml(t *testing.T) { func TestP2pConfig(t *testing.T) { initialPeersList := "/ip4/127.0.0.1/tcp/9999/p2p/16Uiu2HAkw5SNNtSvH1zJiQ6Gc3WoGNSxiyNueRKe6fuAuh57G3Bk" - protocolID := "test protocol id" + protocolID1 := "test protocol id 1" + protocolID2 := "test protocol id 2" shardingType := "ListSharder" port := "37373-38383" @@ -489,16 +500,23 @@ func TestP2pConfig(t *testing.T) { [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" PreventPortReuse = true - [Node.ResourceLimiter] - Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". - ManualSystemMemoryInMB = 1 # not taken into account if the type is not "default with manual scale" - ManualMaximumFD = 2 # not taken into account if the type is not "default with manual scale" + + [Node.ResourceLimiter] + Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". + ManualSystemMemoryInMB = 1 # not taken into account if the type is not "default with manual scale" + ManualMaximumFD = 2 # not taken into account if the type is not "default with manual scale" [KadDhtPeerDiscovery] Enabled = false Type = "" RefreshIntervalInSec = 0 - ProtocolID = "` + protocolID + `" + + # ProtocolIDs represents the protocols that this node will advertise to other peers + # To connect to other nodes, those nodes should have at least one common protocol string + ProtocolIDs = [ + "` + protocolID1 + `", + "` + protocolID2 + `", + ] InitialPeerList = ["` + initialPeersList + `"] #kademlia's routing table bucket size @@ -536,7 +554,7 @@ func TestP2pConfig(t *testing.T) { }, }, KadDhtPeerDiscovery: p2pConfig.KadDhtPeerDiscoveryConfig{ - ProtocolID: protocolID, + ProtocolIDs: []string{protocolID1, protocolID2}, InitialPeerList: []string{initialPeersList}, }, Sharding: p2pConfig.ShardingConfig{ @@ -651,9 +669,6 @@ func TestEnableEpochConfig(t *testing.T) { # ValidatorToDelegationEnableEpoch represents the epoch when the validator-to-delegation feature will be enabled ValidatorToDelegationEnableEpoch = 29 - # WaitingListFixEnableEpoch represents the epoch when the 6 epoch waiting list fix is enabled - WaitingListFixEnableEpoch = 30 - # IncrementSCRNonceInMultiTransferEnableEpoch represents the epoch when the fix for preventing the generation of the same SCRs # is enabled. The fix is done by adding an extra increment. IncrementSCRNonceInMultiTransferEnableEpoch = 31 @@ -667,9 +682,6 @@ func TestEnableEpochConfig(t *testing.T) { # ESDTTransferRoleEnableEpoch represents the epoch when esdt transfer role set is enabled ESDTTransferRoleEnableEpoch = 34 - # BuiltInFunctionOnMetaEnableEpoch represents the epoch when built in function processing on metachain is enabled - BuiltInFunctionOnMetaEnableEpoch = 35 - # ComputeRewardCheckpointEnableEpoch represents the epoch when compute rewards checkpoint epoch is enabled ComputeRewardCheckpointEnableEpoch = 36 @@ -844,9 +856,36 @@ func TestEnableEpochConfig(t *testing.T) { # FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch represents the epoch when the fix for the remaining gas in the SaveKeyValue builtin function is enabled FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch = 91 + + # MigrateDataTrieEnableEpoch represents the epoch when the data tries migration is enabled + MigrateDataTrieEnableEpoch = 92 # CurrentRandomnessOnSortingEnableEpoch represents the epoch when the current randomness on sorting is enabled - CurrentRandomnessOnSortingEnableEpoch = 92 + CurrentRandomnessOnSortingEnableEpoch = 93 + + # AlwaysMergeContextsInEEIEnableEpoch represents the epoch in which the EEI will always merge the contexts + AlwaysMergeContextsInEEIEnableEpoch = 94 + + # CleanupAuctionOnLowWaitingListEnableEpoch represents the epoch when the cleanup auction on low waiting list is enabled + CleanupAuctionOnLowWaitingListEnableEpoch = 95 + + # DynamicESDTEnableEpoch represents the epoch when dynamic NFT feature is enabled + DynamicESDTEnableEpoch = 96 + + # EGLDInMultiTransferEnableEpoch represents the epoch when EGLD in MultiTransfer is enabled + EGLDInMultiTransferEnableEpoch = 97 + + # CryptoOpcodesV2EnableEpoch represents the epoch when BLSMultiSig, Secp256r1 and other opcodes are enabled + CryptoOpcodesV2EnableEpoch = 98 + + # RelayedTransactionsV3EnableEpoch represents the epoch when the relayed transactions V3 will be enabled + RelayedTransactionsV3EnableEpoch = 99 + + # FixRelayedBaseCostEnableEpoch represents the epoch when the fix for relayed base cost will be enabled + FixRelayedBaseCostEnableEpoch = 100 + + # MultiESDTNFTTransferAndExecuteByUserEnableEpoch represents the epoch when enshrined sovereign cross chain opcodes are enabled + MultiESDTNFTTransferAndExecuteByUserEnableEpoch = 101 # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ @@ -899,12 +938,10 @@ func TestEnableEpochConfig(t *testing.T) { SaveJailedAlwaysEnableEpoch: 27, ReDelegateBelowMinCheckEnableEpoch: 28, ValidatorToDelegationEnableEpoch: 29, - WaitingListFixEnableEpoch: 30, IncrementSCRNonceInMultiTransferEnableEpoch: 31, ESDTMultiTransferEnableEpoch: 32, GlobalMintBurnDisableEpoch: 33, ESDTTransferRoleEnableEpoch: 34, - BuiltInFunctionOnMetaEnableEpoch: 35, ComputeRewardCheckpointEnableEpoch: 36, SCRSizeInvariantCheckEnableEpoch: 37, BackwardCompSaveKeyValueEnableEpoch: 38, @@ -961,7 +998,16 @@ func TestEnableEpochConfig(t *testing.T) { NFTStopCreateEnableEpoch: 89, ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 90, FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 91, - CurrentRandomnessOnSortingEnableEpoch: 92, + MigrateDataTrieEnableEpoch: 92, + CurrentRandomnessOnSortingEnableEpoch: 93, + AlwaysMergeContextsInEEIEnableEpoch: 94, + CleanupAuctionOnLowWaitingListEnableEpoch: 95, + DynamicESDTEnableEpoch: 96, + EGLDInMultiTransferEnableEpoch: 97, + CryptoOpcodesV2EnableEpoch: 98, + RelayedTransactionsV3EnableEpoch: 99, + FixRelayedBaseCostEnableEpoch: 100, + MultiESDTNFTTransferAndExecuteByUserEnableEpoch: 101, MaxNodesChangeEnableEpoch: []MaxNodesChangeConfig{ { EpochEnable: 44, diff --git a/consensus/interface.go b/consensus/interface.go index 97292269a99..aa8d9057bc4 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -190,5 +190,6 @@ type KeysHandler interface { GetAssociatedPid(pkBytes []byte) core.PeerID IsOriginalPublicKeyOfTheNode(pkBytes []byte) bool ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) + GetRedundancyStepInReason() string IsInterfaceNil() bool } diff --git a/consensus/mock/peerProcessorStub.go b/consensus/mock/peerProcessorStub.go deleted file mode 100644 index 69e8b8d7d31..00000000000 --- a/consensus/mock/peerProcessorStub.go +++ /dev/null @@ -1,37 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/sharding" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - LoadInitialStateCalled func(in []*sharding.InitialNode) error - UpdatePeerStateCalled func(header, previousHeader data.HeaderHandler) error - IsInterfaceNilCalled func() bool -} - -// LoadInitialState - -func (pm *ValidatorStatisticsProcessorStub) LoadInitialState(in []*sharding.InitialNode) error { - if pm.LoadInitialStateCalled != nil { - return pm.LoadInitialStateCalled(in) - } - return nil -} - -// UpdatePeerState - -func (pm *ValidatorStatisticsProcessorStub) UpdatePeerState(header, previousHeader data.HeaderHandler) error { - if pm.UpdatePeerStateCalled != nil { - return pm.UpdatePeerStateCalled(header, previousHeader) - } - return nil -} - -// IsInterfaceNil - -func (pm *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - if pm.IsInterfaceNilCalled != nil { - return pm.IsInterfaceNilCalled() - } - return false -} diff --git a/consensus/spos/bls/blsSubroundsFactory.go b/consensus/spos/bls/blsSubroundsFactory.go index 81a09e71009..aeb64a5775a 100644 --- a/consensus/spos/bls/blsSubroundsFactory.go +++ b/consensus/spos/bls/blsSubroundsFactory.go @@ -80,7 +80,7 @@ func checkNewFactoryParams( return spos.ErrNilAppStatusHandler } if check.IfNil(sentSignaturesTracker) { - return spos.ErrNilSentSignatureTracker + return ErrNilSentSignatureTracker } if len(chainID) == 0 { return spos.ErrInvalidChainID diff --git a/consensus/spos/bls/blsSubroundsFactory_test.go b/consensus/spos/bls/blsSubroundsFactory_test.go index a0cf949d366..af3267a78cc 100644 --- a/consensus/spos/bls/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/blsSubroundsFactory_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/testscommon" testscommonOutport "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" @@ -76,7 +77,7 @@ func initFactoryWithContainer(container *mock.ConsensusCoreMock) bls.Factory { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return fct @@ -125,7 +126,7 @@ func TestFactory_NewFactoryNilContainerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -145,7 +146,7 @@ func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -167,7 +168,7 @@ func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -189,7 +190,7 @@ func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -211,7 +212,7 @@ func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -233,7 +234,7 @@ func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -255,7 +256,7 @@ func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -277,7 +278,7 @@ func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -299,7 +300,7 @@ func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -321,7 +322,7 @@ func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -343,7 +344,7 @@ func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -365,7 +366,7 @@ func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -387,7 +388,7 @@ func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -407,7 +408,7 @@ func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -428,7 +429,7 @@ func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { chainID, currentPid, nil, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -453,7 +454,7 @@ func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { ) assert.Nil(t, fct) - assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + assert.Equal(t, bls.ErrNilSentSignatureTracker, err) } func TestFactory_NewFactoryShouldWork(t *testing.T) { @@ -478,7 +479,7 @@ func TestFactory_NewFactoryEmptyChainIDShouldFail(t *testing.T) { nil, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) diff --git a/consensus/spos/bls/blsWorker.go b/consensus/spos/bls/blsWorker.go index 8a5eabe6b5a..456d4e8b1d8 100644 --- a/consensus/spos/bls/blsWorker.go +++ b/consensus/spos/bls/blsWorker.go @@ -7,12 +7,13 @@ import ( // peerMaxMessagesPerSec defines how many messages can be propagated by a pid in a round. The value was chosen by // following the next premises: -// 1. a leader can propagate as maximum as 3 messages per round: proposed header block + proposed body + final info; -// 2. due to the fact that a delayed signature of the proposer (from previous round) can be received in the current round -// adds an extra 1 to the total value, reaching value 4; -// 3. Because the leader might be selected in the next round and might have an empty data pool, it can send the newly -// empty proposed block at the very beginning of the next round. One extra message here, yielding to a total of 5. -// 4. If we consider the forks that can appear on the system wee need to add one more to the value. +// 1. a leader can propagate as maximum as 3 messages per round: proposed header block + proposed body + final info; +// 2. due to the fact that a delayed signature of the proposer (from previous round) can be received in the current round +// adds an extra 1 to the total value, reaching value 4; +// 3. Because the leader might be selected in the next round and might have an empty data pool, it can send the newly +// empty proposed block at the very beginning of the next round. One extra message here, yielding to a total of 5. +// 4. If we consider the forks that can appear on the system wee need to add one more to the value. +// // Validators only send one signature message in a round, treating the edge case of a delayed message, will need at most // 2 messages per round (which is ok as it is below the set value of 5) const peerMaxMessagesPerSec = uint32(6) @@ -36,7 +37,7 @@ func NewConsensusService() (*worker, error) { return &wrk, nil } -//InitReceivedMessages initializes the MessagesType map for all messages for the current ConsensusService +// InitReceivedMessages initializes the MessagesType map for all messages for the current ConsensusService func (wrk *worker) InitReceivedMessages() map[consensus.MessageType][]*consensus.Message { receivedMessages := make(map[consensus.MessageType][]*consensus.Message) receivedMessages[MtBlockBodyAndHeader] = make([]*consensus.Message, 0) @@ -54,47 +55,47 @@ func (wrk *worker) GetMaxMessagesInARoundPerPeer() uint32 { return peerMaxMessagesPerSec } -//GetStringValue gets the name of the messageType +// GetStringValue gets the name of the messageType func (wrk *worker) GetStringValue(messageType consensus.MessageType) string { return getStringValue(messageType) } -//GetSubroundName gets the subround name for the subround id provided +// GetSubroundName gets the subround name for the subround id provided func (wrk *worker) GetSubroundName(subroundId int) string { return getSubroundName(subroundId) } -//IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header +// IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header func (wrk *worker) IsMessageWithBlockBodyAndHeader(msgType consensus.MessageType) bool { return msgType == MtBlockBodyAndHeader } -//IsMessageWithBlockBody returns if the current messageType is about block body +// IsMessageWithBlockBody returns if the current messageType is about block body func (wrk *worker) IsMessageWithBlockBody(msgType consensus.MessageType) bool { return msgType == MtBlockBody } -//IsMessageWithBlockHeader returns if the current messageType is about block header +// IsMessageWithBlockHeader returns if the current messageType is about block header func (wrk *worker) IsMessageWithBlockHeader(msgType consensus.MessageType) bool { return msgType == MtBlockHeader } -//IsMessageWithSignature returns if the current messageType is about signature +// IsMessageWithSignature returns if the current messageType is about signature func (wrk *worker) IsMessageWithSignature(msgType consensus.MessageType) bool { return msgType == MtSignature } -//IsMessageWithFinalInfo returns if the current messageType is about header final info +// IsMessageWithFinalInfo returns if the current messageType is about header final info func (wrk *worker) IsMessageWithFinalInfo(msgType consensus.MessageType) bool { return msgType == MtBlockHeaderFinalInfo } -//IsMessageWithInvalidSigners returns if the current messageType is about invalid signers +// IsMessageWithInvalidSigners returns if the current messageType is about invalid signers func (wrk *worker) IsMessageWithInvalidSigners(msgType consensus.MessageType) bool { return msgType == MtInvalidSigners } -//IsMessageTypeValid returns if the current messageType is valid +// IsMessageTypeValid returns if the current messageType is valid func (wrk *worker) IsMessageTypeValid(msgType consensus.MessageType) bool { isMessageTypeValid := msgType == MtBlockBodyAndHeader || msgType == MtBlockBody || @@ -106,17 +107,17 @@ func (wrk *worker) IsMessageTypeValid(msgType consensus.MessageType) bool { return isMessageTypeValid } -//IsSubroundSignature returns if the current subround is about signature +// IsSubroundSignature returns if the current subround is about signature func (wrk *worker) IsSubroundSignature(subroundId int) bool { return subroundId == SrSignature } -//IsSubroundStartRound returns if the current subround is about start round +// IsSubroundStartRound returns if the current subround is about start round func (wrk *worker) IsSubroundStartRound(subroundId int) bool { return subroundId == SrStartRound } -//GetMessageRange provides the MessageType range used in checks by the consensus +// GetMessageRange provides the MessageType range used in checks by the consensus func (wrk *worker) GetMessageRange() []consensus.MessageType { var v []consensus.MessageType @@ -127,7 +128,7 @@ func (wrk *worker) GetMessageRange() []consensus.MessageType { return v } -//CanProceed returns if the current messageType can proceed further if previous subrounds finished +// CanProceed returns if the current messageType can proceed further if previous subrounds finished func (wrk *worker) CanProceed(consensusState *spos.ConsensusState, msgType consensus.MessageType) bool { switch msgType { case MtBlockBodyAndHeader: diff --git a/consensus/spos/bls/errors.go b/consensus/spos/bls/errors.go new file mode 100644 index 00000000000..b840f9e2c85 --- /dev/null +++ b/consensus/spos/bls/errors.go @@ -0,0 +1,6 @@ +package bls + +import "errors" + +// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") diff --git a/consensus/spos/bls/subroundBlock.go b/consensus/spos/bls/subroundBlock.go index d032a04eb63..a83969721b8 100644 --- a/consensus/spos/bls/subroundBlock.go +++ b/consensus/spos/bls/subroundBlock.go @@ -63,7 +63,8 @@ func checkNewSubroundBlockParams( // doBlockJob method does the job of the subround Block func (sr *subroundBlock) doBlockJob(ctx context.Context) bool { - if !sr.IsSelfLeaderInCurrentRound() && !sr.IsMultiKeyLeaderInCurrentRound() { // is NOT self leader in this round? + isSelfLeader := sr.IsSelfLeaderInCurrentRound() && sr.ShouldConsiderSelfKeyInConsensus() + if !isSelfLeader && !sr.IsMultiKeyLeaderInCurrentRound() { // is NOT self leader in this round? return false } diff --git a/consensus/spos/bls/subroundEndRound.go b/consensus/spos/bls/subroundEndRound.go index 723fc0bcbf3..21675715f39 100644 --- a/consensus/spos/bls/subroundEndRound.go +++ b/consensus/spos/bls/subroundEndRound.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/process/headerCheck" ) type subroundEndRound struct { @@ -48,7 +49,7 @@ func NewSubroundEndRound( return nil, spos.ErrNilAppStatusHandler } if check.IfNil(sentSignatureTracker) { - return nil, spos.ErrNilSentSignatureTracker + return nil, ErrNilSentSignatureTracker } srEndRound := subroundEndRound{ @@ -120,9 +121,6 @@ func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsD "AggregateSignature", cnsDta.AggregateSignature, "LeaderSignature", cnsDta.LeaderSignature) - signers := computeSignersPublicKeys(sr.ConsensusGroup(), cnsDta.PubKeysBitmap) - sr.sentSignatureTracker.ReceivedActualSigners(signers) - sr.PeerHonestyHandler().ChangeScore( node, spos.GetConsensusTopicID(sr.ShardCoordinator()), @@ -189,7 +187,7 @@ func (sr *subroundEndRound) receivedInvalidSignersInfo(_ context.Context, cnsDta return false } - if sr.IsSelfLeaderInCurrentRound() { + if sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() { return false } @@ -589,12 +587,23 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfo() { } func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []byte) { + isSelfLeader := sr.IsSelfLeaderInCurrentRound() && sr.ShouldConsiderSelfKeyInConsensus() + if !(isSelfLeader || sr.IsMultiKeyLeaderInCurrentRound()) { + return + } + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("createAndBroadcastInvalidSigners.GetLeader", "error", errGetLeader) + return + } + cnsMsg := consensus.NewConsensusMessage( sr.GetData(), nil, nil, nil, - []byte(sr.SelfPubKey()), + []byte(leader), nil, int(MtInvalidSigners), sr.RoundHandler().Index(), @@ -602,7 +611,7 @@ func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []by nil, nil, nil, - sr.CurrentPid(), + sr.GetAssociatedPid([]byte(leader)), invalidSigners, ) @@ -853,33 +862,9 @@ func (sr *subroundEndRound) doEndRoundConsensusCheck() bool { return false } -// computeSignersPublicKeys will extract from the provided consensus group slice only the strings that matched with the bitmap -func computeSignersPublicKeys(consensusGroup []string, bitmap []byte) []string { - nbBitsBitmap := len(bitmap) * 8 - consensusGroupSize := len(consensusGroup) - size := consensusGroupSize - if consensusGroupSize > nbBitsBitmap { - size = nbBitsBitmap - } - - result := make([]string, 0, len(consensusGroup)) - - for i := 0; i < size; i++ { - indexRequired := (bitmap[i/8] & (1 << uint16(i%8))) > 0 - if !indexRequired { - continue - } - - pubKey := consensusGroup[i] - result = append(result, pubKey) - } - - return result -} - func (sr *subroundEndRound) checkSignaturesValidity(bitmap []byte) error { consensusGroup := sr.ConsensusGroup() - signers := computeSignersPublicKeys(consensusGroup, bitmap) + signers := headerCheck.ComputeSignersPublicKeys(consensusGroup, bitmap) for _, pubKey := range signers { isSigJobDone, err := sr.JobDone(pubKey, SrSignature) if err != nil { diff --git a/consensus/spos/bls/subroundEndRound_test.go b/consensus/spos/bls/subroundEndRound_test.go index 456277e23fc..725513b8cb2 100644 --- a/consensus/spos/bls/subroundEndRound_test.go +++ b/consensus/spos/bls/subroundEndRound_test.go @@ -55,7 +55,7 @@ func initSubroundEndRoundWithContainer( bls.ProcessingThresholdPercent, displayStatistics, appStatusHandler, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return srEndRound @@ -97,7 +97,7 @@ func TestNewSubroundEndRound(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srEndRound) @@ -112,7 +112,7 @@ func TestNewSubroundEndRound(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srEndRound) @@ -127,7 +127,7 @@ func TestNewSubroundEndRound(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, nil, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srEndRound) @@ -146,7 +146,7 @@ func TestNewSubroundEndRound(t *testing.T) { ) assert.Nil(t, srEndRound) - assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + assert.Equal(t, bls.ErrNilSentSignatureTracker, err) }) } @@ -179,7 +179,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing. bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -215,7 +215,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *test bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -252,7 +252,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -288,7 +288,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -324,7 +324,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testin bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -360,7 +360,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -396,7 +396,7 @@ func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.False(t, check.IfNil(srEndRound)) @@ -902,16 +902,8 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldWork(t *testing.T) { PubKey: []byte("A"), } - sentTrackerInterface := sr.GetSentSignatureTracker() - sentTracker := sentTrackerInterface.(*mock.SentSignatureTrackerStub) - receivedActualSignersCalled := false - sentTracker.ReceivedActualSignersCalled = func(signersPks []string) { - receivedActualSignersCalled = true - } - res := sr.ReceivedBlockHeaderFinalInfo(&cnsData) assert.True(t, res) - assert.True(t, receivedActualSignersCalled) } func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldReturnFalseWhenFinalInfoIsNotValid(t *testing.T) { @@ -1322,7 +1314,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { assert.False(t, res) }) - t.Run("received message for self leader", func(t *testing.T) { + t.Run("received message from self leader should return false", func(t *testing.T) { t.Parallel() container := mock.InitConsensusCore() @@ -1339,6 +1331,53 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { assert.False(t, res) }) + t.Run("received message from self multikey leader should return false", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return string(pkBytes) == "A" + }, + } + ch := make(chan bool, 1) + consensusState := initConsensusStateWithKeysHandler(keysHandler) + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + srEndRound, _ := bls.NewSubroundEndRound( + sr, + extend, + bls.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + srEndRound.SetSelfPubKey("A") + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + } + + res := srEndRound.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + t.Run("received hash does not match the hash from current consensus state", func(t *testing.T) { t.Parallel() @@ -1556,29 +1595,60 @@ func TestVerifyInvalidSigners(t *testing.T) { func TestSubroundEndRound_CreateAndBroadcastInvalidSigners(t *testing.T) { t.Parallel() - wg := &sync.WaitGroup{} - wg.Add(1) + t.Run("redundancy node should not send while main is active", func(t *testing.T) { + t.Parallel() - expectedInvalidSigners := []byte("invalid signers") + expectedInvalidSigners := []byte("invalid signers") - wasCalled := false - container := mock.InitConsensusCore() - messenger := &mock.BroadcastMessengerMock{ - BroadcastConsensusMessageCalled: func(message *consensus.Message) error { - wg.Done() - assert.Equal(t, expectedInvalidSigners, message.InvalidSigners) - wasCalled = true - return nil - }, - } - container.SetBroadcastMessenger(messenger) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + container := mock.InitConsensusCore() + nodeRedundancy := &mock.NodeRedundancyHandlerStub{ + IsRedundancyNodeCalled: func() bool { + return true + }, + IsMainMachineActiveCalled: func() bool { + return true + }, + } + container.SetNodeRedundancyHandler(nodeRedundancy) + messenger := &mock.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + assert.Fail(t, "should have not been called") + return nil + }, + } + container.SetBroadcastMessenger(messenger) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) + wg := &sync.WaitGroup{} + wg.Add(1) - wg.Wait() + expectedInvalidSigners := []byte("invalid signers") - require.True(t, wasCalled) + wasCalled := false + container := mock.InitConsensusCore() + messenger := &mock.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + assert.Equal(t, expectedInvalidSigners, message.InvalidSigners) + wasCalled = true + wg.Done() + return nil + }, + } + container.SetBroadcastMessenger(messenger) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + + sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) + + wg.Wait() + + require.True(t, wasCalled) + }) } func TestGetFullMessagesForInvalidSigners(t *testing.T) { @@ -1665,7 +1735,7 @@ func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) t.Run("no managed keys from consensus group", func(t *testing.T) { diff --git a/consensus/spos/bls/subroundSignature.go b/consensus/spos/bls/subroundSignature.go index 84892d660fe..ac06cc72fdd 100644 --- a/consensus/spos/bls/subroundSignature.go +++ b/consensus/spos/bls/subroundSignature.go @@ -39,7 +39,7 @@ func NewSubroundSignature( return nil, spos.ErrNilAppStatusHandler } if check.IfNil(sentSignatureTracker) { - return nil, spos.ErrNilSentSignatureTracker + return nil, ErrNilSentSignatureTracker } srSignature := subroundSignature{ diff --git a/consensus/spos/bls/subroundSignature_test.go b/consensus/spos/bls/subroundSignature_test.go index d12e00b52c0..9ee8a03ba19 100644 --- a/consensus/spos/bls/subroundSignature_test.go +++ b/consensus/spos/bls/subroundSignature_test.go @@ -41,7 +41,7 @@ func initSubroundSignatureWithContainer(container *mock.ConsensusCoreMock) bls.S sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return srSignature @@ -82,7 +82,7 @@ func TestNewSubroundSignature(t *testing.T) { nil, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srSignature) @@ -95,7 +95,7 @@ func TestNewSubroundSignature(t *testing.T) { sr, nil, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srSignature) @@ -108,7 +108,7 @@ func TestNewSubroundSignature(t *testing.T) { sr, extend, nil, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srSignature) @@ -125,7 +125,7 @@ func TestNewSubroundSignature(t *testing.T) { ) assert.Nil(t, srSignature) - assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + assert.Equal(t, bls.ErrNilSentSignatureTracker, err) }) } @@ -157,7 +157,7 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -191,7 +191,7 @@ func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -225,7 +225,7 @@ func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -260,7 +260,7 @@ func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *test sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -294,7 +294,7 @@ func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -328,7 +328,7 @@ func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.False(t, check.IfNil(srSignature)) @@ -411,7 +411,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{ + &testscommon.SentSignatureTrackerStub{ SignatureSentCalled: func(pkBytes []byte) { signatureSentForPks[string(pkBytes)] = struct{}{} }, diff --git a/consensus/spos/bls/subroundStartRound.go b/consensus/spos/bls/subroundStartRound.go index c622779fdac..571270dd774 100644 --- a/consensus/spos/bls/subroundStartRound.go +++ b/consensus/spos/bls/subroundStartRound.go @@ -54,7 +54,7 @@ func NewSubroundStartRound( return nil, fmt.Errorf("%w for resetConsensusMessages function", spos.ErrNilFunctionHandler) } if check.IfNil(sentSignatureTracker) { - return nil, spos.ErrNilSentSignatureTracker + return nil, ErrNilSentSignatureTracker } srStartRound := subroundStartRound{ @@ -155,6 +155,8 @@ func (sr *subroundStartRound) initCurrentRound() bool { sr.ConsensusGroup(), sr.RoundHandler().Index(), ) + // we should not return here, the multikey redundancy system relies on it + // the NodeRedundancyHandler "thinks" it is in redundancy mode even if we use the multikey redundancy system } leader, err := sr.GetLeader() diff --git a/consensus/spos/bls/subroundStartRound_test.go b/consensus/spos/bls/subroundStartRound_test.go index 51c96117dbc..2f5c21d2659 100644 --- a/consensus/spos/bls/subroundStartRound_test.go +++ b/consensus/spos/bls/subroundStartRound_test.go @@ -23,7 +23,7 @@ func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (bls.SubroundStart bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return startRound, err @@ -36,7 +36,7 @@ func defaultWithoutErrorSubroundStartRoundFromSubround(sr *spos.Subround) bls.Su bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return startRound @@ -75,7 +75,7 @@ func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) bl bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return srStartRound @@ -117,7 +117,7 @@ func TestNewSubroundStartRound(t *testing.T) { bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srStartRound) @@ -132,7 +132,7 @@ func TestNewSubroundStartRound(t *testing.T) { bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srStartRound) @@ -148,7 +148,7 @@ func TestNewSubroundStartRound(t *testing.T) { bls.ProcessingThresholdPercent, nil, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srStartRound) @@ -164,7 +164,7 @@ func TestNewSubroundStartRound(t *testing.T) { bls.ProcessingThresholdPercent, executeStoredMessages, nil, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srStartRound) @@ -184,7 +184,7 @@ func TestNewSubroundStartRound(t *testing.T) { ) assert.Nil(t, srStartRound) - assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + assert.Equal(t, bls.ErrNilSentSignatureTracker, err) }) } @@ -366,7 +366,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenInitCu sr := *initSubroundStartRoundWithContainer(container) sentTrackerInterface := sr.GetSentSignatureTracker() - sentTracker := sentTrackerInterface.(*mock.SentSignatureTrackerStub) + sentTracker := sentTrackerInterface.(*testscommon.SentSignatureTrackerStub) startRoundCalled := false sentTracker.StartRoundCalled = func() { startRoundCalled = true @@ -561,7 +561,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasCalled) @@ -615,7 +615,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasCalled) @@ -668,7 +668,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasCalled) @@ -732,7 +732,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasMetricConsensusStateCalled) @@ -800,7 +800,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasMetricConsensusStateCalled) diff --git a/consensus/spos/consensusCore.go b/consensus/spos/consensusCore.go index 1edfb09b5fc..2cf7ca369d6 100644 --- a/consensus/spos/consensusCore.go +++ b/consensus/spos/consensusCore.go @@ -14,7 +14,7 @@ import ( ) // ConsensusCore implements ConsensusCoreHandler and provides access to common functionality -// for the rest of the consensus structures +// for the rest of the consensus structures type ConsensusCore struct { blockChain data.ChainHandler blockProcessor process.BlockProcessor @@ -148,7 +148,7 @@ func (cc *ConsensusCore) MultiSignerContainer() cryptoCommon.MultiSignerContaine return cc.multiSignerContainer } -//RoundHandler gets the RoundHandler stored in the ConsensusCore +// RoundHandler gets the RoundHandler stored in the ConsensusCore func (cc *ConsensusCore) RoundHandler() consensus.RoundHandler { return cc.roundHandler } @@ -158,7 +158,7 @@ func (cc *ConsensusCore) ShardCoordinator() sharding.Coordinator { return cc.shardCoordinator } -//SyncTimer gets the SyncTimer stored in the ConsensusCore +// SyncTimer gets the SyncTimer stored in the ConsensusCore func (cc *ConsensusCore) SyncTimer() ntp.SyncTimer { return cc.syncTimer } diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index c3f48919d83..564b3def852 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -380,6 +380,11 @@ func (cns *ConsensusState) IsMultiKeyJobDone(currentSubroundId int) bool { return true } +// GetMultikeyRedundancyStepInReason returns the reason if the current node stepped in as a multikey redundancy node +func (cns *ConsensusState) GetMultikeyRedundancyStepInReason() string { + return cns.keysHandler.GetRedundancyStepInReason() +} + // ResetRoundsWithoutReceivedMessages will reset the rounds received without a message for a specified public key by // providing also the peer ID from the received message func (cns *ConsensusState) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) { diff --git a/consensus/spos/consensusState_test.go b/consensus/spos/consensusState_test.go index 74c8426f197..554c9c0c755 100644 --- a/consensus/spos/consensusState_test.go +++ b/consensus/spos/consensusState_test.go @@ -5,6 +5,7 @@ import ( "errors" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" @@ -582,3 +583,37 @@ func TestConsensusState_IsMultiKeyJobDone(t *testing.T) { assert.True(t, cns.IsMultiKeyJobDone(0)) }) } + +func TestConsensusState_GetMultikeyRedundancyStepInReason(t *testing.T) { + t.Parallel() + + expectedString := "expected string" + keysHandler := &testscommon.KeysHandlerStub{ + GetRedundancyStepInReasonCalled: func() string { + return expectedString + }, + } + cns := internalInitConsensusStateWithKeysHandler(keysHandler) + + assert.Equal(t, expectedString, cns.GetMultikeyRedundancyStepInReason()) +} + +func TestConsensusState_ResetRoundsWithoutReceivedMessages(t *testing.T) { + t.Parallel() + + resetRoundsWithoutReceivedMessagesCalled := false + testPkBytes := []byte("pk bytes") + testPid := core.PeerID("pid") + + keysHandler := &testscommon.KeysHandlerStub{ + ResetRoundsWithoutReceivedMessagesCalled: func(pkBytes []byte, pid core.PeerID) { + resetRoundsWithoutReceivedMessagesCalled = true + assert.Equal(t, testPkBytes, pkBytes) + assert.Equal(t, testPid, pid) + }, + } + cns := internalInitConsensusStateWithKeysHandler(keysHandler) + + cns.ResetRoundsWithoutReceivedMessages(testPkBytes, testPid) + assert.True(t, resetRoundsWithoutReceivedMessagesCalled) +} diff --git a/consensus/spos/errors.go b/consensus/spos/errors.go index c8b5cede565..3aeac029da3 100644 --- a/consensus/spos/errors.go +++ b/consensus/spos/errors.go @@ -238,8 +238,8 @@ var ErrNilSigningHandler = errors.New("nil signing handler") // ErrNilKeysHandler signals that a nil keys handler was provided var ErrNilKeysHandler = errors.New("nil keys handler") -// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker -var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") - // ErrNilFunctionHandler signals that a nil function handler was provided var ErrNilFunctionHandler = errors.New("nil function handler") + +// ErrWrongHashForHeader signals that the hash of the header is not the expected one +var ErrWrongHashForHeader = errors.New("wrong hash for header") diff --git a/consensus/spos/export_test.go b/consensus/spos/export_test.go index 3a02e7b27fb..39d19de6e30 100644 --- a/consensus/spos/export_test.go +++ b/consensus/spos/export_test.go @@ -10,6 +10,9 @@ import ( "github.com/multiversx/mx-chain-go/process" ) +// RedundancySingleKeySteppedIn exposes the redundancySingleKeySteppedIn constant +const RedundancySingleKeySteppedIn = redundancySingleKeySteppedIn + type RoundConsensus struct { *roundConsensus } @@ -173,6 +176,16 @@ func (wrk *Worker) CheckSelfState(cnsDta *consensus.Message) error { return wrk.checkSelfState(cnsDta) } +// SetRedundancyHandler - +func (wrk *Worker) SetRedundancyHandler(redundancyHandler consensus.NodeRedundancyHandler) { + wrk.nodeRedundancyHandler = redundancyHandler +} + +// SetKeysHandler - +func (wrk *Worker) SetKeysHandler(keysHandler consensus.KeysHandler) { + wrk.consensusState.keysHandler = keysHandler +} + // EligibleList - func (rcns *RoundConsensus) EligibleList() map[string]struct{} { return rcns.eligibleNodes diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 235c139d2fb..0ca771d30e5 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -175,6 +175,5 @@ type PeerBlackListCacher interface { type SentSignaturesTracker interface { StartRound() SignatureSent(pkBytes []byte) - ReceivedActualSigners(signersPks []string) IsInterfaceNil() bool } diff --git a/consensus/spos/sposFactory/sposFactory_test.go b/consensus/spos/sposFactory/sposFactory_test.go index 090f5b19f0a..4a672a3343f 100644 --- a/consensus/spos/sposFactory/sposFactory_test.go +++ b/consensus/spos/sposFactory/sposFactory_test.go @@ -52,7 +52,7 @@ func TestGetSubroundsFactory_BlsNilConsensusCoreShouldErr(t *testing.T) { consensusType, statusHandler, indexer, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, chainID, currentPid, ) @@ -76,7 +76,7 @@ func TestGetSubroundsFactory_BlsNilStatusHandlerShouldErr(t *testing.T) { consensusType, nil, indexer, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, chainID, currentPid, ) @@ -101,7 +101,7 @@ func TestGetSubroundsFactory_BlsShouldWork(t *testing.T) { consensusType, statusHandler, indexer, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, chainID, currentPid, ) diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index 8fdcca4686f..f11e40d3089 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -1,6 +1,7 @@ package spos import ( + "bytes" "context" "encoding/hex" "errors" @@ -17,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" errorsErd "github.com/multiversx/mx-chain-go/errors" @@ -31,6 +33,7 @@ var _ closing.Closer = (*Worker)(nil) // sleepTime defines the time in milliseconds between each iteration made in checkChannels method const sleepTime = 5 * time.Millisecond +const redundancySingleKeySteppedIn = "single-key node stepped in" // Worker defines the data needed by spos to communicate between nodes which are in the validators group type Worker struct { @@ -484,6 +487,11 @@ func (wrk *Worker) doJobOnMessageWithHeader(cnsMsg *consensus.Message) error { "nbTxs", header.GetTxCount(), "val stats root hash", valStatsRootHash) + if !wrk.verifyHeaderHash(headerHash, cnsMsg.Header) { + return fmt.Errorf("%w : received header from consensus with wrong hash", + ErrWrongHashForHeader) + } + err = wrk.headerIntegrityVerifier.Verify(header) if err != nil { return fmt.Errorf("%w : verify header integrity from consensus topic failed", err) @@ -508,6 +516,11 @@ func (wrk *Worker) doJobOnMessageWithHeader(cnsMsg *consensus.Message) error { return nil } +func (wrk *Worker) verifyHeaderHash(hash []byte, marshalledHeader []byte) bool { + computedHash := wrk.hasher.Compute(string(marshalledHeader)) + return bytes.Equal(hash, computedHash) +} + func (wrk *Worker) doJobOnMessageWithSignature(cnsMsg *consensus.Message, p2pMsg p2p.MessageP2P) { wrk.mutDisplayHashConsensusMessage.Lock() defer wrk.mutDisplayHashConsensusMessage.Unlock() @@ -545,7 +558,20 @@ func (wrk *Worker) processReceivedHeaderMetric(cnsDta *consensus.Message) { } percent := sinceRoundStart * 100 / wrk.roundHandler.TimeDuration() wrk.appStatusHandler.SetUInt64Value(common.MetricReceivedProposedBlock, uint64(percent)) - wrk.appStatusHandler.SetStringValue(common.MetricRedundancyIsMainActive, strconv.FormatBool(wrk.nodeRedundancyHandler.IsMainMachineActive())) + + isMainMachineActive, redundancyReason := wrk.computeRedundancyMetrics() + wrk.appStatusHandler.SetStringValue(common.MetricRedundancyIsMainActive, strconv.FormatBool(isMainMachineActive)) + wrk.appStatusHandler.SetStringValue(common.MetricRedundancyStepInReason, redundancyReason) +} + +func (wrk *Worker) computeRedundancyMetrics() (bool, string) { + if !wrk.nodeRedundancyHandler.IsMainMachineActive() { + return false, redundancySingleKeySteppedIn + } + + reason := wrk.consensusState.GetMultikeyRedundancyStepInReason() + + return len(reason) == 0, reason } func (wrk *Worker) checkSelfState(cnsDta *consensus.Message) error { diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 37cc36f33c1..b179fdf0db8 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "math/big" + "strconv" "sync/atomic" "testing" "time" @@ -15,6 +16,9 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/mock" @@ -26,8 +30,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const roundTimeDuration = 100 * time.Millisecond @@ -628,13 +630,21 @@ func TestWorker_ProcessReceivedMessageComputeReceivedProposedBlockMetric(t *test delay := time.Millisecond * 430 roundStartTimeStamp := time.Now() - receivedValue := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric(roundStartTimeStamp, delay, roundDuration) + receivedValue, redundancyReason, redundancyStatus := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t, + roundStartTimeStamp, + delay, + roundDuration, + &mock.NodeRedundancyHandlerStub{}, + &testscommon.KeysHandlerStub{}) minimumExpectedValue := uint64(delay * 100 / roundDuration) assert.True(t, receivedValue >= minimumExpectedValue, fmt.Sprintf("minimum expected was %d, got %d", minimumExpectedValue, receivedValue), ) + assert.Empty(t, redundancyReason) + assert.True(t, redundancyStatus) }) t.Run("time.Since returns negative value", func(t *testing.T) { // test the edgecase when the returned NTP time stored in the round handler is @@ -645,23 +655,101 @@ func TestWorker_ProcessReceivedMessageComputeReceivedProposedBlockMetric(t *test delay := time.Millisecond * 430 roundStartTimeStamp := time.Now().Add(time.Minute) - receivedValue := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric(roundStartTimeStamp, delay, roundDuration) + receivedValue, redundancyReason, redundancyStatus := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t, + roundStartTimeStamp, + delay, + roundDuration, + &mock.NodeRedundancyHandlerStub{}, + &testscommon.KeysHandlerStub{}) assert.Zero(t, receivedValue) + assert.Empty(t, redundancyReason) + assert.True(t, redundancyStatus) + }) + t.Run("normal operation as a single-key redundancy node", func(t *testing.T) { + t.Parallel() + + roundDuration := time.Millisecond * 1000 + delay := time.Millisecond * 430 + roundStartTimeStamp := time.Now() + + receivedValue, redundancyReason, redundancyStatus := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t, + roundStartTimeStamp, + delay, + roundDuration, + &mock.NodeRedundancyHandlerStub{ + IsMainMachineActiveCalled: func() bool { + return false + }, + }, + &testscommon.KeysHandlerStub{}) + + minimumExpectedValue := uint64(delay * 100 / roundDuration) + assert.True(t, + receivedValue >= minimumExpectedValue, + fmt.Sprintf("minimum expected was %d, got %d", minimumExpectedValue, receivedValue), + ) + assert.Equal(t, spos.RedundancySingleKeySteppedIn, redundancyReason) + assert.False(t, redundancyStatus) + }) + t.Run("normal operation as a multikey-key redundancy node", func(t *testing.T) { + t.Parallel() + + roundDuration := time.Millisecond * 1000 + delay := time.Millisecond * 430 + roundStartTimeStamp := time.Now() + + multikeyReason := "multikey step in reason" + receivedValue, redundancyReason, redundancyStatus := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t, + roundStartTimeStamp, + delay, + roundDuration, + &mock.NodeRedundancyHandlerStub{}, + &testscommon.KeysHandlerStub{ + GetRedundancyStepInReasonCalled: func() string { + return multikeyReason + }, + }) + + minimumExpectedValue := uint64(delay * 100 / roundDuration) + assert.True(t, + receivedValue >= minimumExpectedValue, + fmt.Sprintf("minimum expected was %d, got %d", minimumExpectedValue, receivedValue), + ) + assert.Equal(t, multikeyReason, redundancyReason) + assert.False(t, redundancyStatus) }) } func testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t *testing.T, roundStartTimeStamp time.Time, delay time.Duration, roundDuration time.Duration, -) uint64 { + redundancyHandler consensus.NodeRedundancyHandler, + keysHandler consensus.KeysHandler, +) (uint64, string, bool) { marshaller := mock.MarshalizerMock{} receivedValue := uint64(0) + redundancyReason := "" + redundancyStatus := false wrk := *initWorker(&statusHandlerMock.AppStatusHandlerStub{ SetUInt64ValueHandler: func(key string, value uint64) { receivedValue = value }, + SetStringValueHandler: func(key string, value string) { + if key == common.MetricRedundancyIsMainActive { + var err error + redundancyStatus, err = strconv.ParseBool(value) + assert.Nil(t, err) + } + if key == common.MetricRedundancyStepInReason { + redundancyReason = value + } + }, }) wrk.SetBlockProcessor(&testscommon.BlockProcessorStub{ DecodeBlockHeaderCalled: func(dta []byte) data.HeaderHandler { @@ -686,6 +774,8 @@ func testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( return roundStartTimeStamp }, }) + wrk.SetRedundancyHandler(redundancyHandler) + wrk.SetKeysHandler(keysHandler) hdr := &block.Header{ ChainID: chainID, PrevHash: []byte("prev hash"), @@ -725,7 +815,7 @@ func testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( } _ = wrk.ProcessReceivedMessage(msg, "", &p2pmocks.MessengerStub{}) - return receivedValue + return receivedValue, redundancyReason, redundancyStatus } func TestWorker_ProcessReceivedMessageInconsistentChainIDInConsensusMessageShouldErr(t *testing.T) { @@ -1163,6 +1253,64 @@ func TestWorker_ProcessReceivedMessageWithABadOriginatorShouldErr(t *testing.T) assert.True(t, errors.Is(err, spos.ErrOriginatorMismatch)) } +func TestWorker_ProcessReceivedMessageWithHeaderAndWrongHash(t *testing.T) { + t.Parallel() + + workerArgs := createDefaultWorkerArgs(&statusHandlerMock.AppStatusHandlerStub{}) + wrk, _ := spos.NewWorker(workerArgs) + + wrk.SetBlockProcessor( + &testscommon.BlockProcessorStub{ + DecodeBlockHeaderCalled: func(dta []byte) data.HeaderHandler { + return &testscommon.HeaderHandlerStub{ + CheckChainIDCalled: func(reference []byte) error { + return nil + }, + GetPrevHashCalled: func() []byte { + return make([]byte, 0) + }, + } + }, + RevertCurrentBlockCalled: func() { + }, + DecodeBlockBodyCalled: func(dta []byte) data.BodyHandler { + return nil + }, + }, + ) + + hdr := &block.Header{ChainID: chainID} + hdrHash := make([]byte, 32) // wrong hash + hdrStr, _ := mock.MarshalizerMock{}.Marshal(hdr) + cnsMsg := consensus.NewConsensusMessage( + hdrHash, + nil, + nil, + hdrStr, + []byte(wrk.ConsensusState().ConsensusGroup()[0]), + signature, + int(bls.MtBlockHeader), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + buff, _ := wrk.Marshalizer().Marshal(cnsMsg) + msg := &p2pmocks.P2PMessageMock{ + DataField: buff, + PeerField: currentPid, + SignatureField: []byte("signature"), + } + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) + time.Sleep(time.Second) + + assert.Equal(t, 0, len(wrk.ReceivedMessages()[bls.MtBlockHeader])) + assert.ErrorIs(t, err, spos.ErrWrongHashForHeader) +} + func TestWorker_ProcessReceivedMessageOkValsShouldWork(t *testing.T) { t.Parallel() diff --git a/dataRetriever/blockchain/blockchain.go b/dataRetriever/blockchain/blockchain.go index bf18ad64402..f8d011e5a08 100644 --- a/dataRetriever/blockchain/blockchain.go +++ b/dataRetriever/blockchain/blockchain.go @@ -69,6 +69,7 @@ func (bc *blockChain) SetCurrentBlockHeaderAndRootHash(header data.HeaderHandler bc.appStatusHandler.SetUInt64Value(common.MetricNonce, h.GetNonce()) bc.appStatusHandler.SetUInt64Value(common.MetricSynchronizedRound, h.GetRound()) + bc.appStatusHandler.SetUInt64Value(common.MetricBlockTimestamp, h.GetTimeStamp()) bc.mut.Lock() bc.currentBlockHeader = h.ShallowClone() diff --git a/dataRetriever/blockchain/metachain.go b/dataRetriever/blockchain/metachain.go index 179b1b84b0a..0ef4b1247c2 100644 --- a/dataRetriever/blockchain/metachain.go +++ b/dataRetriever/blockchain/metachain.go @@ -71,6 +71,7 @@ func (mc *metaChain) SetCurrentBlockHeaderAndRootHash(header data.HeaderHandler, mc.appStatusHandler.SetUInt64Value(common.MetricNonce, currHead.Nonce) mc.appStatusHandler.SetUInt64Value(common.MetricSynchronizedRound, currHead.Round) + mc.appStatusHandler.SetUInt64Value(common.MetricBlockTimestamp, currHead.GetTimeStamp()) mc.mut.Lock() mc.currentBlockHeader = currHead.ShallowClone() diff --git a/dataRetriever/chainStorer.go b/dataRetriever/chainStorer.go index 88541d10077..933d4b97a51 100644 --- a/dataRetriever/chainStorer.go +++ b/dataRetriever/chainStorer.go @@ -10,7 +10,7 @@ import ( var _ StorageService = (*ChainStorer)(nil) // ChainStorer is a StorageService implementation that can hold multiple storages -// grouped by storage unit type +// grouped by storage unit type type ChainStorer struct { lock sync.RWMutex chain map[UnitType]storage.Storer diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go index 0033d14f686..6e1415ddfd8 100644 --- a/dataRetriever/factory/dataPoolFactory.go +++ b/dataRetriever/factory/dataPoolFactory.go @@ -2,7 +2,6 @@ package factory import ( "fmt" - "os" "time" "github.com/multiversx/mx-chain-core-go/core" @@ -179,22 +178,12 @@ func createTrieSyncDB(args ArgsDataPool) (storage.Persister, error) { shardId := core.GetShardIDString(args.ShardCoordinator.SelfId()) path := args.PathManager.PathForStatic(shardId, mainConfig.TrieSyncStorage.DB.FilePath) - dbConfigHandler := factory.NewDBConfigHandler(mainConfig.TrieSyncStorage.DB) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(mainConfig.TrieSyncStorage.DB) if err != nil { return nil, err } - if mainConfig.TrieSyncStorage.DB.UseTmpAsFilePath { - filePath, errTempDir := os.MkdirTemp("", "trieSyncStorage") - if errTempDir != nil { - return nil, errTempDir - } - - path = filePath - } - - db, err := storageunit.NewDB(persisterFactory, path) + db, err := persisterFactory.CreateWithRetries(path) if err != nil { return nil, fmt.Errorf("%w while creating the db for the trie nodes", err) } diff --git a/dataRetriever/factory/resolverscontainer/args.go b/dataRetriever/factory/resolverscontainer/args.go index 1446af01b97..d0001014a4d 100644 --- a/dataRetriever/factory/resolverscontainer/args.go +++ b/dataRetriever/factory/resolverscontainer/args.go @@ -11,21 +11,22 @@ import ( // FactoryArgs will hold the arguments for ResolversContainerFactory for both shard and meta type FactoryArgs struct { - NumConcurrentResolvingJobs int32 - ShardCoordinator sharding.Coordinator - MainMessenger p2p.Messenger - FullArchiveMessenger p2p.Messenger - Store dataRetriever.StorageService - Marshalizer marshal.Marshalizer - DataPools dataRetriever.PoolsHolder - Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - DataPacker dataRetriever.DataPacker - TriesContainer common.TriesHolder - InputAntifloodHandler dataRetriever.P2PAntifloodHandler - OutputAntifloodHandler dataRetriever.P2PAntifloodHandler - MainPreferredPeersHolder p2p.PreferredPeersHolderHandler - FullArchivePreferredPeersHolder p2p.PreferredPeersHolderHandler - SizeCheckDelta uint32 - IsFullHistoryNode bool - PayloadValidator dataRetriever.PeerAuthenticationPayloadValidator + NumConcurrentResolvingJobs int32 + NumConcurrentResolvingTrieNodesJobs int32 + ShardCoordinator sharding.Coordinator + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + Store dataRetriever.StorageService + Marshalizer marshal.Marshalizer + DataPools dataRetriever.PoolsHolder + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + DataPacker dataRetriever.DataPacker + TriesContainer common.TriesHolder + InputAntifloodHandler dataRetriever.P2PAntifloodHandler + OutputAntifloodHandler dataRetriever.P2PAntifloodHandler + MainPreferredPeersHolder p2p.PreferredPeersHolderHandler + FullArchivePreferredPeersHolder p2p.PreferredPeersHolderHandler + SizeCheckDelta uint32 + IsFullHistoryNode bool + PayloadValidator dataRetriever.PeerAuthenticationPayloadValidator } diff --git a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go index c1fc1e3a16b..3d0eff8eaa9 100644 --- a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go @@ -36,6 +36,7 @@ type baseResolversContainerFactory struct { inputAntifloodHandler dataRetriever.P2PAntifloodHandler outputAntifloodHandler dataRetriever.P2PAntifloodHandler throttler dataRetriever.ResolverThrottler + trieNodesThrottler dataRetriever.ResolverThrottler intraShardTopic string isFullHistoryNode bool mainPreferredPeersHolder dataRetriever.PreferredPeersHolderHandler @@ -78,7 +79,10 @@ func (brcf *baseResolversContainerFactory) checkParams() error { return fmt.Errorf("%w for output", dataRetriever.ErrNilAntifloodHandler) } if check.IfNil(brcf.throttler) { - return dataRetriever.ErrNilThrottler + return fmt.Errorf("%w for the main throttler", dataRetriever.ErrNilThrottler) + } + if check.IfNil(brcf.trieNodesThrottler) { + return fmt.Errorf("%w for the trie nodes throttler", dataRetriever.ErrNilThrottler) } if check.IfNil(brcf.mainPreferredPeersHolder) { return fmt.Errorf("%w for main network", dataRetriever.ErrNilPreferredPeersHolder) @@ -351,7 +355,7 @@ func (brcf *baseResolversContainerFactory) createTrieNodesResolver( SenderResolver: resolverSender, Marshaller: brcf.marshalizer, AntifloodHandler: brcf.inputAntifloodHandler, - Throttler: brcf.throttler, + Throttler: brcf.trieNodesThrottler, }, TrieDataGetter: trie, } diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go index 426a978ae20..b72f8c3154a 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go @@ -27,7 +27,12 @@ func NewMetaResolversContainerFactory( args.Marshalizer = marshal.NewSizeCheckUnmarshalizer(args.Marshalizer, args.SizeCheckDelta) } - thr, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingJobs) + mainThrottler, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingJobs) + if err != nil { + return nil, err + } + + trieNodesThrottler, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingTrieNodesJobs) if err != nil { return nil, err } @@ -46,7 +51,8 @@ func NewMetaResolversContainerFactory( triesContainer: args.TriesContainer, inputAntifloodHandler: args.InputAntifloodHandler, outputAntifloodHandler: args.OutputAntifloodHandler, - throttler: thr, + throttler: mainThrottler, + trieNodesThrottler: trieNodesThrottler, isFullHistoryNode: args.IsFullHistoryNode, mainPreferredPeersHolder: args.MainPreferredPeersHolder, fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go index c6659693d79..755672384cd 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go @@ -94,8 +94,15 @@ func TestNewMetaResolversContainerFactory_NewNumGoRoutinesThrottlerFailsShouldEr args := getArgumentsMeta() args.NumConcurrentResolvingJobs = 0 + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + assert.Nil(t, rcf) + assert.Equal(t, core.ErrNotPositiveValue, err) + + args.NumConcurrentResolvingJobs = 10 + args.NumConcurrentResolvingTrieNodesJobs = 0 + rcf, err = resolverscontainer.NewMetaResolversContainerFactory(args) assert.Nil(t, rcf) assert.Equal(t, core.ErrNotPositiveValue, err) } @@ -357,21 +364,22 @@ func TestMetaResolversContainerFactory_IsInterfaceNil(t *testing.T) { func getArgumentsMeta() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - MainMessenger: createStubMessengerForMeta("", ""), - FullArchiveMessenger: createStubMessengerForMeta("", ""), - Store: createStoreForMeta(), - Marshalizer: &mock.MarshalizerMock{}, - DataPools: createDataPoolsForMeta(), - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, - DataPacker: &mock.DataPackerStub{}, - TriesContainer: createTriesHolderForMeta(), - SizeCheckDelta: 0, - InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - NumConcurrentResolvingJobs: 10, - MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + MainMessenger: createStubMessengerForMeta("", ""), + FullArchiveMessenger: createStubMessengerForMeta("", ""), + Store: createStoreForMeta(), + Marshalizer: &mock.MarshalizerMock{}, + DataPools: createDataPoolsForMeta(), + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + DataPacker: &mock.DataPackerStub{}, + TriesContainer: createTriesHolderForMeta(), + SizeCheckDelta: 0, + InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, } } diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go index 28582f03bc5..f24beaa4331 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go @@ -25,7 +25,12 @@ func NewShardResolversContainerFactory( args.Marshalizer = marshal.NewSizeCheckUnmarshalizer(args.Marshalizer, args.SizeCheckDelta) } - thr, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingJobs) + mainThrottler, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingJobs) + if err != nil { + return nil, err + } + + trieNodesThrottler, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingTrieNodesJobs) if err != nil { return nil, err } @@ -44,7 +49,8 @@ func NewShardResolversContainerFactory( triesContainer: args.TriesContainer, inputAntifloodHandler: args.InputAntifloodHandler, outputAntifloodHandler: args.OutputAntifloodHandler, - throttler: thr, + throttler: mainThrottler, + trieNodesThrottler: trieNodesThrottler, isFullHistoryNode: args.IsFullHistoryNode, mainPreferredPeersHolder: args.MainPreferredPeersHolder, fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go index 4d6ca351195..ca97015f3ae 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go @@ -100,8 +100,15 @@ func TestNewShardResolversContainerFactory_NewNumGoRoutinesThrottlerFailsShouldE args := getArgumentsShard() args.NumConcurrentResolvingJobs = 0 + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + assert.Nil(t, rcf) + assert.Equal(t, core.ErrNotPositiveValue, err) + + args.NumConcurrentResolvingJobs = 10 + args.NumConcurrentResolvingTrieNodesJobs = 0 + rcf, err = resolverscontainer.NewShardResolversContainerFactory(args) assert.Nil(t, rcf) assert.Equal(t, core.ErrNotPositiveValue, err) } @@ -465,21 +472,22 @@ func TestShardResolversContainerFactory_IsInterfaceNil(t *testing.T) { func getArgumentsShard() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - MainMessenger: createMessengerStubForShard("", ""), - FullArchiveMessenger: createMessengerStubForShard("", ""), - Store: createStoreForShard(), - Marshalizer: &mock.MarshalizerMock{}, - DataPools: createDataPoolsForShard(), - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, - DataPacker: &mock.DataPackerStub{}, - TriesContainer: createTriesHolderForShard(), - SizeCheckDelta: 0, - InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - NumConcurrentResolvingJobs: 10, - MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + MainMessenger: createMessengerStubForShard("", ""), + FullArchiveMessenger: createMessengerStubForShard("", ""), + Store: createStoreForShard(), + Marshalizer: &mock.MarshalizerMock{}, + DataPools: createDataPoolsForShard(), + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + DataPacker: &mock.DataPackerStub{}, + TriesContainer: createTriesHolderForShard(), + SizeCheckDelta: 0, + InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, } } diff --git a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go index e68b10d5e46..2682231a768 100644 --- a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go @@ -10,7 +10,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" @@ -20,9 +19,6 @@ import ( "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" - storageFactory "github.com/multiversx/mx-chain-go/storage/factory" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" ) const defaultBeforeGracefulClose = time.Minute @@ -239,46 +235,6 @@ func (brcf *baseRequestersContainerFactory) createMiniBlocksRequester(responseTo return mbRequester, nil } -func (brcf *baseRequestersContainerFactory) newImportDBTrieStorage( - mainStorer storage.Storer, - storageIdentifier dataRetriever.UnitType, - handler common.EnableEpochsHandler, - stateStatsHandler common.StateStatisticsHandler, -) (common.StorageManager, dataRetriever.TrieDataGetter, error) { - pathManager, err := storageFactory.CreatePathManager( - storageFactory.ArgCreatePathManager{ - WorkingDir: brcf.workingDir, - ChainID: brcf.chainID, - }, - ) - if err != nil { - return nil, nil, err - } - - trieFactoryArgs := trieFactory.TrieFactoryArgs{ - Marshalizer: brcf.marshalizer, - Hasher: brcf.hasher, - PathManager: pathManager, - TrieStorageManagerConfig: brcf.generalConfig.TrieStorageManagerConfig, - } - trieFactoryInstance, err := trieFactory.NewTrieFactory(trieFactoryArgs) - if err != nil { - return nil, nil, err - } - - args := trieFactory.TrieCreateArgs{ - MainStorer: mainStorer, - PruningEnabled: brcf.generalConfig.StateTriesConfig.AccountsStatePruningEnabled, - MaxTrieLevelInMem: brcf.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, - SnapshotsEnabled: brcf.snapshotsEnabled, - IdleProvider: disabled.NewProcessStatusHandler(), - Identifier: storageIdentifier.String(), - EnableEpochsHandler: handler, - StatsCollector: stateStatsHandler, - } - return trieFactoryInstance.Create(args) -} - func (brcf *baseRequestersContainerFactory) generatePeerAuthenticationRequester() error { identifierPeerAuth := common.PeerAuthenticationTopic peerAuthRequester := disabledRequesters.NewDisabledRequester() diff --git a/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go b/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go index a0d6963ad14..675ebd6f276 100644 --- a/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go +++ b/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go @@ -9,7 +9,7 @@ import ( ) // deltaEpochActive represents how many epochs behind the current computed epoch are to be considered "active" and -//cause the requests to be sent to all peers regardless of being full observers or not. Usually, a node will have +// cause the requests to be sent to all peers regardless of being full observers or not. Usually, a node will have // [config.toml].[StoragePruning].NumActivePersisters opened persisters but to the fact that a shorter epoch can happen, // that value is lowered at a maximum 1. const deltaEpochActive = uint32(1) diff --git a/dataRetriever/resolvers/trieNodeResolver.go b/dataRetriever/resolvers/trieNodeResolver.go index 871ed85fee5..275327d44c6 100644 --- a/dataRetriever/resolvers/trieNodeResolver.go +++ b/dataRetriever/resolvers/trieNodeResolver.go @@ -1,6 +1,8 @@ package resolvers import ( + "sync" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/batch" @@ -20,6 +22,7 @@ type ArgTrieNodeResolver struct { // TrieNodeResolver is a wrapper over Resolver that is specialized in resolving trie node requests type TrieNodeResolver struct { + mutCriticalSection sync.Mutex *baseResolver messageProcessor trieDataGetter dataRetriever.TrieDataGetter @@ -104,6 +107,9 @@ func (tnRes *TrieNodeResolver) resolveMultipleHashes(hashesBuff []byte, message } func (tnRes *TrieNodeResolver) resolveOnlyRequestedHashes(hashes [][]byte, nodes map[string]struct{}) (int, bool) { + tnRes.mutCriticalSection.Lock() + defer tnRes.mutCriticalSection.Unlock() + spaceUsed := 0 usedAllSpace := false remainingSpace := core.MaxBufferSizeToSendTrieNodes @@ -129,6 +135,9 @@ func (tnRes *TrieNodeResolver) resolveOnlyRequestedHashes(hashes [][]byte, nodes } func (tnRes *TrieNodeResolver) resolveSubTries(hashes [][]byte, nodes map[string]struct{}, spaceUsedAlready int) { + tnRes.mutCriticalSection.Lock() + defer tnRes.mutCriticalSection.Unlock() + var serializedNodes [][]byte var err error var serializedNode []byte @@ -168,7 +177,10 @@ func convertMapToSlice(m map[string]struct{}) [][]byte { } func (tnRes *TrieNodeResolver) resolveOneHash(hash []byte, chunkIndex uint32, message p2p.MessageP2P, source p2p.MessageHandler) error { + tnRes.mutCriticalSection.Lock() serializedNode, err := tnRes.trieDataGetter.GetSerializedNode(hash) + tnRes.mutCriticalSection.Unlock() + if err != nil { return err } diff --git a/dataRetriever/txpool/memorytests/memory_test.go b/dataRetriever/txpool/memorytests/memory_test.go index d2d48fbbcd5..a0484f016b8 100644 --- a/dataRetriever/txpool/memorytests/memory_test.go +++ b/dataRetriever/txpool/memorytests/memory_test.go @@ -36,25 +36,25 @@ func TestShardedTxPool_MemoryFootprint(t *testing.T) { journals = append(journals, runScenario(t, newScenario(200, 1, core.MegabyteSize, "0"), memoryAssertion{200, 200}, memoryAssertion{0, 1})) journals = append(journals, runScenario(t, newScenario(10, 1000, 20480, "0"), memoryAssertion{190, 205}, memoryAssertion{1, 4})) journals = append(journals, runScenario(t, newScenario(10000, 1, 1024, "0"), memoryAssertion{10, 16}, memoryAssertion{4, 10})) - journals = append(journals, runScenario(t, newScenario(1, 60000, 256, "0"), memoryAssertion{30, 36}, memoryAssertion{10, 16})) - journals = append(journals, runScenario(t, newScenario(10, 10000, 100, "0"), memoryAssertion{36, 46}, memoryAssertion{16, 24})) - journals = append(journals, runScenario(t, newScenario(100000, 1, 1024, "0"), memoryAssertion{120, 136}, memoryAssertion{56, 60})) + journals = append(journals, runScenario(t, newScenario(1, 60000, 256, "0"), memoryAssertion{30, 40}, memoryAssertion{10, 16})) + journals = append(journals, runScenario(t, newScenario(10, 10000, 100, "0"), memoryAssertion{36, 52}, memoryAssertion{16, 24})) + journals = append(journals, runScenario(t, newScenario(100000, 1, 1024, "0"), memoryAssertion{120, 138}, memoryAssertion{56, 60})) // With larger memory footprint - journals = append(journals, runScenario(t, newScenario(100000, 3, 650, "0"), memoryAssertion{290, 320}, memoryAssertion{95, 120})) - journals = append(journals, runScenario(t, newScenario(150000, 2, 650, "0"), memoryAssertion{290, 320}, memoryAssertion{120, 140})) - journals = append(journals, runScenario(t, newScenario(300000, 1, 650, "0"), memoryAssertion{290, 320}, memoryAssertion{170, 190})) - journals = append(journals, runScenario(t, newScenario(30, 10000, 650, "0"), memoryAssertion{290, 320}, memoryAssertion{60, 75})) - journals = append(journals, runScenario(t, newScenario(300, 1000, 650, "0"), memoryAssertion{290, 320}, memoryAssertion{60, 80})) + journals = append(journals, runScenario(t, newScenario(100000, 3, 650, "0"), memoryAssertion{290, 335}, memoryAssertion{95, 120})) + journals = append(journals, runScenario(t, newScenario(150000, 2, 650, "0"), memoryAssertion{290, 335}, memoryAssertion{120, 140})) + journals = append(journals, runScenario(t, newScenario(300000, 1, 650, "0"), memoryAssertion{290, 335}, memoryAssertion{170, 190})) + journals = append(journals, runScenario(t, newScenario(30, 10000, 650, "0"), memoryAssertion{290, 335}, memoryAssertion{60, 75})) + journals = append(journals, runScenario(t, newScenario(300, 1000, 650, "0"), memoryAssertion{290, 335}, memoryAssertion{60, 80})) // Scenarios where destination == me journals = append(journals, runScenario(t, newScenario(100, 1, core.MegabyteSize, "1_0"), memoryAssertion{90, 100}, memoryAssertion{0, 1})) journals = append(journals, runScenario(t, newScenario(10000, 1, 10240, "1_0"), memoryAssertion{96, 128}, memoryAssertion{0, 4})) - journals = append(journals, runScenario(t, newScenario(10, 10000, 1000, "1_0"), memoryAssertion{96, 136}, memoryAssertion{16, 25})) - journals = append(journals, runScenario(t, newScenario(150000, 1, 128, "1_0"), memoryAssertion{50, 75}, memoryAssertion{30, 40})) - journals = append(journals, runScenario(t, newScenario(1, 150000, 128, "1_0"), memoryAssertion{50, 75}, memoryAssertion{30, 40})) + journals = append(journals, runScenario(t, newScenario(10, 10000, 1000, "1_0"), memoryAssertion{96, 140}, memoryAssertion{16, 25})) + journals = append(journals, runScenario(t, newScenario(150000, 1, 128, "1_0"), memoryAssertion{50, 80}, memoryAssertion{30, 40})) + journals = append(journals, runScenario(t, newScenario(1, 150000, 128, "1_0"), memoryAssertion{50, 80}, memoryAssertion{30, 40})) for _, journal := range journals { journal.displayFootprintsSummary() diff --git a/debug/handler/interceptorDebugHandler.go b/debug/handler/interceptorDebugHandler.go index 9c5b2cb361a..a00f7b878b9 100644 --- a/debug/handler/interceptorDebugHandler.go +++ b/debug/handler/interceptorDebugHandler.go @@ -202,7 +202,7 @@ func (idh *interceptorDebugHandler) incrementNumOfPrints() { } } -//TODO replace this with a call to Query(search) when a suitable conditional parser will be used. Also replace config parameters +// TODO replace this with a call to Query(search) when a suitable conditional parser will be used. Also replace config parameters // with a query string so it will be more extensible func (idh *interceptorDebugHandler) getStringEvents(maxNumPrints int) []string { acceptEvent := func(ev *event) bool { diff --git a/docker/keygenerator/Dockerfile b/docker/keygenerator/Dockerfile index c66a732e629..a73d7951d42 100644 --- a/docker/keygenerator/Dockerfile +++ b/docker/keygenerator/Dockerfile @@ -12,5 +12,4 @@ RUN go build FROM ubuntu:22.04 COPY --from=builder /go/mx-chain-go/cmd/keygenerator /go/mx-chain-go/cmd/keygenerator -WORKDIR /go/mx-chain-go/cmd/keygenerator/ -ENTRYPOINT ["./keygenerator"] +ENTRYPOINT ["/go/mx-chain-go/cmd/keygenerator/keygenerator"] diff --git a/docker/node/Dockerfile b/docker/node/Dockerfile index cf6a8955c76..2a341a8409b 100644 --- a/docker/node/Dockerfile +++ b/docker/node/Dockerfile @@ -7,17 +7,24 @@ RUN go mod tidy # Multiversx node WORKDIR /go/mx-chain-go/cmd/node RUN go build -v -ldflags="-X main.appVersion=$(git describe --tags --long --dirty)" -RUN cp /go/pkg/mod/github.com/multiversx/$(cat /go/mx-chain-go/go.mod | grep mx-chain-vm-v | sort -n | tail -n -1| awk -F '/' '{print$3}'| sed 's/ /@/g')/wasmer/libwasmer_linux_amd64.so /lib/libwasmer_linux_amd64.so -RUN cp /go/pkg/mod/github.com/multiversx/$(cat /go/mx-chain-go/go.mod | grep mx-chain-vm-go | sort -n | tail -n -1| awk -F '/' '{print$3}'| sed 's/ /@/g')/wasmer2/libvmexeccapi.so /lib/libvmexeccapi.so -WORKDIR /go/mx-chain-go/cmd/node +RUN mkdir -p /lib_amd64 /lib_arm64 + +RUN cp /go/pkg/mod/github.com/multiversx/$(cat /go/mx-chain-go/go.mod | grep mx-chain-vm-v | sort -n | tail -n -1 | awk -F '/' '{print$3}' | sed 's/ /@/g')/wasmer/libwasmer_linux_amd64.so /lib_amd64/ +RUN cp /go/pkg/mod/github.com/multiversx/$(cat /go/mx-chain-go/go.mod | grep mx-chain-vm-go | sort -n | tail -n -1 | awk -F '/' '{print$3}' | sed 's/ /@/g')/wasmer2/libvmexeccapi.so /lib_amd64/ + +RUN cp /go/pkg/mod/github.com/multiversx/$(cat /go/mx-chain-go/go.mod | grep mx-chain-vm-v | sort -n | tail -n -1 | awk -F '/' '{print$3}' | sed 's/ /@/g')/wasmer/libwasmer_linux_arm64_shim.so /lib_arm64/ +RUN cp /go/pkg/mod/github.com/multiversx/$(cat /go/mx-chain-go/go.mod | grep mx-chain-vm-go | sort -n | tail -n -1 | awk -F '/' '{print$3}' | sed 's/ /@/g')/wasmer2/libvmexeccapi_arm.so /lib_arm64/ # ===== SECOND STAGE ====== FROM ubuntu:22.04 +ARG TARGETARCH RUN apt-get update && apt-get upgrade -y -COPY --from=builder "/go/mx-chain-go/cmd/node" "/go/mx-chain-go/cmd/node/" -COPY --from=builder "/lib/libwasmer_linux_amd64.so" "/lib/libwasmer_linux_amd64.so" -COPY --from=builder "/lib/libvmexeccapi.so" "/lib/libvmexeccapi.so" +COPY --from=builder "/go/mx-chain-go/cmd/node/node" "/go/mx-chain-go/cmd/node/" + +# Copy architecture-specific files +COPY --from=builder "/lib_${TARGETARCH}/*" "/lib/" + WORKDIR /go/mx-chain-go/cmd/node/ EXPOSE 8080 ENTRYPOINT ["/go/mx-chain-go/cmd/node/node"] diff --git a/docker/termui/Dockerfile b/docker/termui/Dockerfile index bcc670e3ce3..e22986033eb 100644 --- a/docker/termui/Dockerfile +++ b/docker/termui/Dockerfile @@ -4,11 +4,18 @@ WORKDIR /go/mx-chain-go COPY . . WORKDIR /go/mx-chain-go/cmd/termui RUN go build -v -RUN cp /go/pkg/mod/github.com/multiversx/$(cat /go/mx-chain-go/go.mod | grep mx-chain-vm-v | sort -n | tail -n -1| awk -F '/' '{print$3}'| sed 's/ /@/g')/wasmer/libwasmer_linux_amd64.so /lib/libwasmer_linux_amd64.so +RUN mkdir -p /lib_amd64 /lib_arm64 +RUN cp /go/pkg/mod/github.com/multiversx/$(cat /go/mx-chain-go/go.mod | grep mx-chain-vm-v | sort -n | tail -n -1| awk -F '/' '{print$3}'| sed 's/ /@/g')/wasmer/libwasmer_linux_amd64.so /lib_amd64/ +RUN cp /go/pkg/mod/github.com/multiversx/$(cat /go/mx-chain-go/go.mod | grep mx-chain-vm-v | sort -n | tail -n -1 | awk -F '/' '{print$3}' | sed 's/ /@/g')/wasmer/libwasmer_linux_arm64_shim.so /lib_arm64/ + # ===== SECOND STAGE ====== FROM ubuntu:22.04 +ARG TARGETARCH COPY --from=builder /go/mx-chain-go/cmd/termui /go/mx-chain-go/cmd/termui -COPY --from=builder "/lib/libwasmer_linux_amd64.so" "/lib/libwasmer_linux_amd64.so" + +# Copy architecture-specific files +COPY --from=builder "/lib_${TARGETARCH}/*" "/lib/" + WORKDIR /go/mx-chain-go/cmd/termui/ ENTRYPOINT ["./termui"] diff --git a/epochStart/bootstrap/baseStorageHandler.go b/epochStart/bootstrap/baseStorageHandler.go index dcf9193808d..1442af7e3b0 100644 --- a/epochStart/bootstrap/baseStorageHandler.go +++ b/epochStart/bootstrap/baseStorageHandler.go @@ -2,22 +2,65 @@ package bootstrap import ( "encoding/hex" - "encoding/json" "strings" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/typeConverters" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/storage" ) +// StorageHandlerArgs is a struct placeholder for all arguments required to create either a shard or a meta storage handler +type StorageHandlerArgs struct { + GeneralConfig config.Config + PreferencesConfig config.PreferencesConfig + ShardCoordinator sharding.Coordinator + PathManagerHandler storage.PathManagerHandler + Marshaller marshal.Marshalizer + Hasher hashing.Hasher + CurrentEpoch uint32 + Uint64Converter typeConverters.Uint64ByteSliceConverter + NodeTypeProvider NodeTypeProviderHandler + NodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory + SnapshotsEnabled bool + ManagedPeersHolder common.ManagedPeersHolder + NodeProcessingMode common.NodeProcessingMode + RepopulateTokensSupplies bool + StateStatsHandler common.StateStatisticsHandler +} + +func checkNilArgs(args StorageHandlerArgs) error { + if check.IfNil(args.ShardCoordinator) { + return core.ErrNilShardCoordinator + } + if check.IfNil(args.PathManagerHandler) { + return dataRetriever.ErrNilPathManager + } + if check.IfNil(args.Marshaller) { + return core.ErrNilMarshalizer + } + if check.IfNil(args.Hasher) { + return core.ErrNilHasher + } + if check.IfNil(args.Uint64Converter) { + return dataRetriever.ErrNilUint64ByteSliceConverter + } + if check.IfNil(args.NodesCoordinatorRegistryFactory) { + return nodesCoordinator.ErrNilNodesCoordinatorRegistryFactory + } + return nil +} + type miniBlocksInfo struct { miniBlockHashes [][]byte fullyProcessed []bool @@ -33,12 +76,13 @@ type processedIndexes struct { // baseStorageHandler handles the storage functions for saving bootstrap data type baseStorageHandler struct { - storageService dataRetriever.StorageService - shardCoordinator sharding.Coordinator - marshalizer marshal.Marshalizer - hasher hashing.Hasher - currentEpoch uint32 - uint64Converter typeConverters.Uint64ByteSliceConverter + storageService dataRetriever.StorageService + shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher + currentEpoch uint32 + uint64Converter typeConverters.Uint64ByteSliceConverter + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } func (bsh *baseStorageHandler) groupMiniBlocksByShard(miniBlocks map[string]*block.MiniBlock) ([]bootstrapStorage.PendingMiniBlocksInfo, error) { @@ -61,12 +105,11 @@ func (bsh *baseStorageHandler) groupMiniBlocksByShard(miniBlocks map[string]*blo func (bsh *baseStorageHandler) saveNodesCoordinatorRegistry( metaBlock data.HeaderHandler, - nodesConfig *nodesCoordinator.NodesCoordinatorRegistry, + nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler, ) ([]byte, error) { key := append([]byte(common.NodesCoordinatorRegistryKeyPrefix), metaBlock.GetPrevRandSeed()...) - // TODO: replace hardcoded json - although it is hardcoded in nodesCoordinator as well. - registryBytes, err := json.Marshal(nodesConfig) + registryBytes, err := bsh.nodesCoordinatorRegistryFactory.GetRegistryData(nodesConfig, metaBlock.GetEpoch()) if err != nil { return nil, err } @@ -81,7 +124,7 @@ func (bsh *baseStorageHandler) saveNodesCoordinatorRegistry( return nil, err } - log.Debug("saving nodes coordinator config", "key", key) + log.Debug("saving nodes coordinator config", "key", key, "epoch", metaBlock.GetEpoch()) return metaBlock.GetPrevRandSeed(), nil } diff --git a/epochStart/bootstrap/common.go b/epochStart/bootstrap/common.go index 19bfa2acc54..da6e99fda1b 100644 --- a/epochStart/bootstrap/common.go +++ b/epochStart/bootstrap/common.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) const baseErrorMessage = "error with epoch start bootstrapper arguments" @@ -119,6 +120,9 @@ func checkArguments(args ArgsEpochStartBootstrap) error { if check.IfNil(args.StateStatsHandler) { return fmt.Errorf("%s: %w", baseErrorMessage, statistics.ErrNilStateStatsHandler) } + if check.IfNil(args.NodesCoordinatorRegistryFactory) { + return fmt.Errorf("%s: %w", baseErrorMessage, nodesCoordinator.ErrNilNodesCoordinatorRegistryFactory) + } return nil } diff --git a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go index 066c9e32866..c44dae7ae48 100644 --- a/epochStart/bootstrap/disabled/disabledAccountsAdapter.go +++ b/epochStart/bootstrap/disabled/disabledAccountsAdapter.go @@ -86,12 +86,7 @@ func (a *accountsAdapter) RootHash() ([]byte, error) { } // RecreateTrie - -func (a *accountsAdapter) RecreateTrie(_ []byte) error { - return nil -} - -// RecreateTrieFromEpoch - -func (a *accountsAdapter) RecreateTrieFromEpoch(_ common.RootHashHolder) error { +func (a *accountsAdapter) RecreateTrie(_ common.RootHashHolder) error { return nil } diff --git a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go index 742fa1e0523..f7c1502d0c4 100644 --- a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go +++ b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go @@ -49,6 +49,16 @@ func (n *nodesCoordinator) GetAllWaitingValidatorsPublicKeys(_ uint32) (map[uint return nil, nil } +// GetAllShuffledOutValidatorsPublicKeys - +func (n *nodesCoordinator) GetAllShuffledOutValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { + return nil, nil +} + +// GetShuffledOutToAuctionValidatorsPublicKeys - +func (n *nodesCoordinator) GetShuffledOutToAuctionValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { + return nil, nil +} + // GetConsensusValidatorsPublicKeys - func (n *nodesCoordinator) GetConsensusValidatorsPublicKeys(_ []byte, _ uint64, _ uint32, _ uint32) ([]string, error) { return nil, nil diff --git a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go index d659989896b..0ebf8417d7b 100644 --- a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -14,6 +14,7 @@ import ( disabledFactory "github.com/multiversx/mx-chain-go/factory/disabled" disabledGenesis "github.com/multiversx/mx-chain-go/genesis/process/disabled" "github.com/multiversx/mx-chain-go/process" + processDisabled "github.com/multiversx/mx-chain-go/process/disabled" "github.com/multiversx/mx-chain-go/process/factory/interceptorscontainer" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/storage/cache" @@ -108,6 +109,7 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) FullArchivePeerShardMapper: fullArchivePeerShardMapper, HardforkTrigger: hardforkTrigger, NodeOperationMode: args.NodeOperationMode, + RelayedTxV3Processor: processDisabled.NewRelayedTxV3Processor(), } interceptorsContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(containerFactoryArgs) diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index b6dea44ee81..868d0359ef5 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -2,7 +2,6 @@ package bootstrap import ( "bytes" - "encoding/json" "fmt" "strconv" @@ -196,22 +195,22 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { func (e *epochStartBootstrap) checkIfShuffledOut( pubKey []byte, - nodesConfig *nodesCoordinator.NodesCoordinatorRegistry, + nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler, ) (uint32, bool) { epochIDasString := fmt.Sprint(e.baseData.lastEpoch) - epochConfig := nodesConfig.EpochsConfig[epochIDasString] + epochConfig := nodesConfig.GetEpochsConfig()[epochIDasString] if epochConfig == nil { return e.baseData.shardId, false } - newShardId, isWaitingForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.WaitingValidators) + newShardId, isWaitingForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.GetWaitingValidators()) if isWaitingForShard { isShuffledOut := newShardId != e.baseData.shardId e.nodeType = core.NodeTypeValidator return newShardId, isShuffledOut } - newShardId, isEligibleForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.EligibleValidators) + newShardId, isEligibleForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.GetEligibleValidators()) if isEligibleForShard { isShuffledOut := newShardId != e.baseData.shardId e.nodeType = core.NodeTypeValidator @@ -252,7 +251,7 @@ func checkIfValidatorIsInList( return false } -func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*bootstrapStorage.BootstrapData, *nodesCoordinator.NodesCoordinatorRegistry, error) { +func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*bootstrapStorage.BootstrapData, nodesCoordinator.NodesCoordinatorRegistryHandler, error) { bootStorer, err := bootstrapStorage.NewBootstrapStorer(e.coreComponentsHolder.InternalMarshalizer(), storer) if err != nil { return nil, nil, err @@ -271,8 +270,7 @@ func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*boot return nil, nil, err } - config := &nodesCoordinator.NodesCoordinatorRegistry{} - err = json.Unmarshal(d, config) + config, err := e.nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry(d) if err != nil { return nil, nil, err } diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index e934e450f7c..bfc293032ee 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -13,7 +13,7 @@ import ( // StartOfEpochNodesConfigHandler defines the methods to process nodesConfig from epoch start metablocks type StartOfEpochNodesConfigHandler interface { - NodesConfigFromMetaBlock(currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler) (*nodesCoordinator.NodesCoordinatorRegistry, uint32, []*block.MiniBlock, error) + NodesConfigFromMetaBlock(currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler) (nodesCoordinator.NodesCoordinatorRegistryHandler, uint32, []*block.MiniBlock, error) IsInterfaceNil() bool } @@ -26,7 +26,7 @@ type EpochStartMetaBlockInterceptorProcessor interface { // StartInEpochNodesCoordinator defines the methods to process and save nodesCoordinator information to storage type StartInEpochNodesCoordinator interface { EpochStartPrepare(metaHdr data.HeaderHandler, body data.BodyHandler) - NodesCoordinatorToRegistry() *nodesCoordinator.NodesCoordinatorRegistry + NodesCoordinatorToRegistry(epoch uint32) nodesCoordinator.NodesCoordinatorRegistryHandler ShardIdForEpoch(epoch uint32) (uint32, error) IsInterfaceNil() bool } diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index 65e7e9c9237..01f65ccabe6 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -7,17 +7,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/typeConverters" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" - "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/factory" ) @@ -26,36 +20,28 @@ type metaStorageHandler struct { } // NewMetaStorageHandler will return a new instance of metaStorageHandler -func NewMetaStorageHandler( - generalConfig config.Config, - prefsConfig config.PreferencesConfig, - shardCoordinator sharding.Coordinator, - pathManagerHandler storage.PathManagerHandler, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - currentEpoch uint32, - uint64Converter typeConverters.Uint64ByteSliceConverter, - nodeTypeProvider NodeTypeProviderHandler, - nodeProcessingMode common.NodeProcessingMode, - managedPeersHolder common.ManagedPeersHolder, - stateStatsHandler common.StateStatisticsHandler, -) (*metaStorageHandler, error) { +func NewMetaStorageHandler(args StorageHandlerArgs) (*metaStorageHandler, error) { + err := checkNilArgs(args) + if err != nil { + return nil, err + } + epochStartNotifier := &disabled.EpochStartNotifier{} storageFactory, err := factory.NewStorageServiceFactory( factory.StorageServiceFactoryArgs{ - Config: generalConfig, - PrefsConfig: prefsConfig, - ShardCoordinator: shardCoordinator, - PathManager: pathManagerHandler, + Config: args.GeneralConfig, + PrefsConfig: args.PreferencesConfig, + ShardCoordinator: args.ShardCoordinator, + PathManager: args.PathManagerHandler, EpochStartNotifier: epochStartNotifier, - NodeTypeProvider: nodeTypeProvider, - CurrentEpoch: currentEpoch, + NodeTypeProvider: args.NodeTypeProvider, StorageType: factory.BootstrapStorageService, + ManagedPeersHolder: args.ManagedPeersHolder, + CurrentEpoch: args.CurrentEpoch, CreateTrieEpochRootHashStorer: false, - NodeProcessingMode: nodeProcessingMode, - RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time - ManagedPeersHolder: managedPeersHolder, - StateStatsHandler: stateStatsHandler, + NodeProcessingMode: args.NodeProcessingMode, + RepopulateTokensSupplies: false, + StateStatsHandler: args.StateStatsHandler, }, ) if err != nil { @@ -68,12 +54,13 @@ func NewMetaStorageHandler( } base := &baseStorageHandler{ - storageService: storageService, - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - currentEpoch: currentEpoch, - uint64Converter: uint64Converter, + storageService: storageService, + shardCoordinator: args.ShardCoordinator, + marshalizer: args.Marshaller, + hasher: args.Hasher, + currentEpoch: args.CurrentEpoch, + uint64Converter: args.Uint64Converter, + nodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } return &metaStorageHandler{baseStorageHandler: base}, nil diff --git a/epochStart/bootstrap/metaStorageHandler_test.go b/epochStart/bootstrap/metaStorageHandler_test.go index 4fee7dee5b5..92603df176a 100644 --- a/epochStart/bootstrap/metaStorageHandler_test.go +++ b/epochStart/bootstrap/metaStorageHandler_test.go @@ -20,36 +20,37 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func createStorageHandlerArgs() StorageHandlerArgs { + return StorageHandlerArgs{ + GeneralConfig: testscommon.GetGeneralConfig(), + PreferencesConfig: config.PreferencesConfig{}, + ShardCoordinator: &mock.ShardCoordinatorStub{}, + PathManagerHandler: &testscommon.PathManagerStub{}, + Marshaller: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + CurrentEpoch: 0, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, + ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SnapshotsEnabled: false, + NodeProcessingMode: common.Normal, + StateStatsHandler: disabled.NewStateStatistics(), + RepopulateTokensSupplies: false, + } +} + func TestNewMetaStorageHandler_InvalidConfigErr(t *testing.T) { - gCfg := config.Config{} - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, err := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + args.GeneralConfig = config.Config{} + + mtStrHandler, err := NewMetaStorageHandler(args) assert.True(t, check.IfNil(mtStrHandler)) assert.NotNil(t, err) } @@ -59,29 +60,8 @@ func TestNewMetaStorageHandler_CreateForMetaErr(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - mtStrHandler, err := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, err := NewMetaStorageHandler(args) assert.False(t, check.IfNil(mtStrHandler)) assert.Nil(t, err) } @@ -91,34 +71,11 @@ func TestMetaStorageHandler_saveLastHeader(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) - + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) header := &block.MetaBlock{Nonce: 0} - headerHash, _ := core.CalculateHash(marshalizer, hasher, header) + headerHash, _ := core.CalculateHash(args.Marshaller, args.Hasher, header) expectedBootInfo := bootstrapStorage.BootstrapHeaderInfo{ ShardId: core.MetachainShardId, Hash: headerHash, } @@ -133,35 +90,13 @@ func TestMetaStorageHandler_saveLastCrossNotarizedHeaders(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) hdr1 := &block.Header{Nonce: 1} hdr2 := &block.Header{Nonce: 2} - hdrHash1, _ := core.CalculateHash(marshalizer, hasher, hdr1) - hdrHash2, _ := core.CalculateHash(marshalizer, hasher, hdr2) + hdrHash1, _ := core.CalculateHash(args.Marshaller, args.Hasher, hdr1) + hdrHash2, _ := core.CalculateHash(args.Marshaller, args.Hasher, hdr2) hdr3 := &block.MetaBlock{ Nonce: 3, @@ -181,30 +116,8 @@ func TestMetaStorageHandler_saveTriggerRegistry(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Nonce: 3}, @@ -220,30 +133,8 @@ func TestMetaStorageHandler_saveDataToStorage(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Nonce: 3}, @@ -276,30 +167,8 @@ func testMetaWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) counter := 0 mtStrHandler.storageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 55a642a6793..dce9135e0a3 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -73,7 +73,7 @@ type Parameters struct { Epoch uint32 SelfShardId uint32 NumOfShards uint32 - NodesConfig *nodesCoordinator.NodesCoordinatorRegistry + NodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler } // ComponentsNeededForBootstrap holds the components which need to be initialized from network @@ -81,7 +81,7 @@ type ComponentsNeededForBootstrap struct { EpochStartMetaBlock data.MetaHeaderHandler PreviousEpochStart data.MetaHeaderHandler ShardHeader data.HeaderHandler - NodesConfig *nodesCoordinator.NodesCoordinatorRegistry + NodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler Headers map[string]data.HeaderHandler ShardCoordinator sharding.Coordinator PendingMiniBlocks map[string]*block.MiniBlock @@ -136,15 +136,17 @@ type epochStartBootstrap struct { storageOpenerHandler storage.UnitOpenerHandler latestStorageDataProvider storage.LatestStorageDataProviderHandler argumentsParser process.ArgumentsParser + dataSyncerFactory types.ScheduledDataSyncerCreator dataSyncerWithScheduled types.ScheduledDataSyncer storageService dataRetriever.StorageService + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory // gathered data epochStartMeta data.MetaHeaderHandler prevEpochStartMeta data.MetaHeaderHandler syncedHeaders map[string]data.HeaderHandler - nodesConfig *nodesCoordinator.NodesCoordinatorRegistry + nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler baseData baseDataInStorage startRound int64 nodeType core.NodeType @@ -163,30 +165,31 @@ type baseDataInStorage struct { // ArgsEpochStartBootstrap holds the arguments needed for creating an epoch start data provider component type ArgsEpochStartBootstrap struct { - CoreComponentsHolder process.CoreComponentsHolder - CryptoComponentsHolder process.CryptoComponentsHolder - DestinationShardAsObserver uint32 - MainMessenger p2p.Messenger - FullArchiveMessenger p2p.Messenger - GeneralConfig config.Config - PrefsConfig config.PreferencesConfig - FlagsConfig config.ContextFlagsConfig - EconomicsData process.EconomicsDataHandler - GenesisNodesConfig sharding.GenesisNodesSetupHandler - GenesisShardCoordinator sharding.Coordinator - StorageUnitOpener storage.UnitOpenerHandler - LatestStorageDataProvider storage.LatestStorageDataProviderHandler - Rater nodesCoordinator.ChanceComputer - NodeShuffler nodesCoordinator.NodesShuffler - RoundHandler epochStart.RoundHandler - ArgumentsParser process.ArgumentsParser - StatusHandler core.AppStatusHandler - HeaderIntegrityVerifier process.HeaderIntegrityVerifier - DataSyncerCreator types.ScheduledDataSyncerCreator - ScheduledSCRsStorer storage.Storer - TrieSyncStatisticsProvider common.SizeSyncStatisticsHandler - NodeProcessingMode common.NodeProcessingMode - StateStatsHandler common.StateStatisticsHandler + CoreComponentsHolder process.CoreComponentsHolder + CryptoComponentsHolder process.CryptoComponentsHolder + DestinationShardAsObserver uint32 + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + GeneralConfig config.Config + PrefsConfig config.PreferencesConfig + FlagsConfig config.ContextFlagsConfig + EconomicsData process.EconomicsDataHandler + GenesisNodesConfig sharding.GenesisNodesSetupHandler + GenesisShardCoordinator sharding.Coordinator + StorageUnitOpener storage.UnitOpenerHandler + LatestStorageDataProvider storage.LatestStorageDataProviderHandler + Rater nodesCoordinator.ChanceComputer + NodeShuffler nodesCoordinator.NodesShuffler + RoundHandler epochStart.RoundHandler + ArgumentsParser process.ArgumentsParser + StatusHandler core.AppStatusHandler + HeaderIntegrityVerifier process.HeaderIntegrityVerifier + DataSyncerCreator types.ScheduledDataSyncerCreator + ScheduledSCRsStorer storage.Storer + TrieSyncStatisticsProvider common.SizeSyncStatisticsHandler + NodeProcessingMode common.NodeProcessingMode + StateStatsHandler common.StateStatisticsHandler + NodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } type dataToSync struct { @@ -205,38 +208,40 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, } epochStartProvider := &epochStartBootstrap{ - coreComponentsHolder: args.CoreComponentsHolder, - cryptoComponentsHolder: args.CryptoComponentsHolder, - mainMessenger: args.MainMessenger, - fullArchiveMessenger: args.FullArchiveMessenger, - generalConfig: args.GeneralConfig, - prefsConfig: args.PrefsConfig, - flagsConfig: args.FlagsConfig, - economicsData: args.EconomicsData, - genesisNodesConfig: args.GenesisNodesConfig, - genesisShardCoordinator: args.GenesisShardCoordinator, - rater: args.Rater, - destinationShardAsObserver: args.DestinationShardAsObserver, - nodeShuffler: args.NodeShuffler, - roundHandler: args.RoundHandler, - storageOpenerHandler: args.StorageUnitOpener, - latestStorageDataProvider: args.LatestStorageDataProvider, - shuffledOut: false, - statusHandler: args.StatusHandler, - nodeType: core.NodeTypeObserver, - argumentsParser: args.ArgumentsParser, - headerIntegrityVerifier: args.HeaderIntegrityVerifier, - numConcurrentTrieSyncers: args.GeneralConfig.TrieSync.NumConcurrentTrieSyncers, - maxHardCapForMissingNodes: args.GeneralConfig.TrieSync.MaxHardCapForMissingNodes, - trieSyncerVersion: args.GeneralConfig.TrieSync.TrieSyncerVersion, - checkNodesOnDisk: args.GeneralConfig.TrieSync.CheckNodesOnDisk, - dataSyncerFactory: args.DataSyncerCreator, - storerScheduledSCRs: args.ScheduledSCRsStorer, - shardCoordinator: args.GenesisShardCoordinator, - trieSyncStatisticsProvider: args.TrieSyncStatisticsProvider, - nodeProcessingMode: args.NodeProcessingMode, - nodeOperationMode: common.NormalOperation, - stateStatsHandler: args.StateStatsHandler, + coreComponentsHolder: args.CoreComponentsHolder, + cryptoComponentsHolder: args.CryptoComponentsHolder, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + generalConfig: args.GeneralConfig, + prefsConfig: args.PrefsConfig, + flagsConfig: args.FlagsConfig, + economicsData: args.EconomicsData, + genesisNodesConfig: args.GenesisNodesConfig, + genesisShardCoordinator: args.GenesisShardCoordinator, + rater: args.Rater, + destinationShardAsObserver: args.DestinationShardAsObserver, + nodeShuffler: args.NodeShuffler, + roundHandler: args.RoundHandler, + storageOpenerHandler: args.StorageUnitOpener, + latestStorageDataProvider: args.LatestStorageDataProvider, + shuffledOut: false, + statusHandler: args.StatusHandler, + nodeType: core.NodeTypeObserver, + argumentsParser: args.ArgumentsParser, + headerIntegrityVerifier: args.HeaderIntegrityVerifier, + numConcurrentTrieSyncers: args.GeneralConfig.TrieSync.NumConcurrentTrieSyncers, + maxHardCapForMissingNodes: args.GeneralConfig.TrieSync.MaxHardCapForMissingNodes, + trieSyncerVersion: args.GeneralConfig.TrieSync.TrieSyncerVersion, + checkNodesOnDisk: args.GeneralConfig.TrieSync.CheckNodesOnDisk, + dataSyncerFactory: args.DataSyncerCreator, + storerScheduledSCRs: args.ScheduledSCRsStorer, + shardCoordinator: args.GenesisShardCoordinator, + trieSyncStatisticsProvider: args.TrieSyncStatisticsProvider, + nodeProcessingMode: args.NodeProcessingMode, + nodeOperationMode: common.NormalOperation, + stateStatsHandler: args.StateStatsHandler, + startEpoch: args.GeneralConfig.EpochStartConfig.GenesisEpoch, + nodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } if epochStartProvider.prefsConfig.FullArchive { @@ -754,19 +759,20 @@ func (e *epochStartBootstrap) processNodesConfig(pubKey []byte) ([]*block.MiniBl shardId = e.genesisShardCoordinator.SelfId() } argsNewValidatorStatusSyncers := ArgsNewSyncValidatorStatus{ - DataPool: e.dataPool, - Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), - RequestHandler: e.requestHandler, - ChanceComputer: e.rater, - GenesisNodesConfig: e.genesisNodesConfig, - NodeShuffler: e.nodeShuffler, - Hasher: e.coreComponentsHolder.Hasher(), - PubKey: pubKey, - ShardIdAsObserver: shardId, - ChanNodeStop: e.coreComponentsHolder.ChanStopNodeProcess(), - NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), - IsFullArchive: e.prefsConfig.FullArchive, - EnableEpochsHandler: e.coreComponentsHolder.EnableEpochsHandler(), + DataPool: e.dataPool, + Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), + RequestHandler: e.requestHandler, + ChanceComputer: e.rater, + GenesisNodesConfig: e.genesisNodesConfig, + NodeShuffler: e.nodeShuffler, + Hasher: e.coreComponentsHolder.Hasher(), + PubKey: pubKey, + ShardIdAsObserver: shardId, + ChanNodeStop: e.coreComponentsHolder.ChanStopNodeProcess(), + NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), + IsFullArchive: e.prefsConfig.FullArchive, + EnableEpochsHandler: e.coreComponentsHolder.EnableEpochsHandler(), + NodesCoordinatorRegistryFactory: e.nodesCoordinatorRegistryFactory, } e.nodesConfigHandler, err = NewSyncValidatorStatus(argsNewValidatorStatusSyncers) @@ -784,20 +790,22 @@ func (e *epochStartBootstrap) processNodesConfig(pubKey []byte) ([]*block.MiniBl func (e *epochStartBootstrap) requestAndProcessForMeta(peerMiniBlocks []*block.MiniBlock) error { var err error - storageHandlerComponent, err := NewMetaStorageHandler( - e.generalConfig, - e.prefsConfig, - e.shardCoordinator, - e.coreComponentsHolder.PathHandler(), - e.coreComponentsHolder.InternalMarshalizer(), - e.coreComponentsHolder.Hasher(), - e.epochStartMeta.GetEpoch(), - e.coreComponentsHolder.Uint64ByteSliceConverter(), - e.coreComponentsHolder.NodeTypeProvider(), - e.nodeProcessingMode, - e.cryptoComponentsHolder.ManagedPeersHolder(), - e.stateStatsHandler, - ) + argsStorageHandler := StorageHandlerArgs{ + GeneralConfig: e.generalConfig, + PreferencesConfig: e.prefsConfig, + ShardCoordinator: e.shardCoordinator, + PathManagerHandler: e.coreComponentsHolder.PathHandler(), + Marshaller: e.coreComponentsHolder.InternalMarshalizer(), + Hasher: e.coreComponentsHolder.Hasher(), + CurrentEpoch: e.epochStartMeta.GetEpoch(), + Uint64Converter: e.coreComponentsHolder.Uint64ByteSliceConverter(), + NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), + NodesCoordinatorRegistryFactory: e.nodesCoordinatorRegistryFactory, + ManagedPeersHolder: e.cryptoComponentsHolder.ManagedPeersHolder(), + NodeProcessingMode: e.nodeProcessingMode, + StateStatsHandler: e.stateStatsHandler, + } + storageHandlerComponent, err := NewMetaStorageHandler(argsStorageHandler) if err != nil { return err } @@ -954,20 +962,22 @@ func (e *epochStartBootstrap) requestAndProcessForShard(peerMiniBlocks []*block. e.syncedHeaders[hash] = hdr } - storageHandlerComponent, err := NewShardStorageHandler( - e.generalConfig, - e.prefsConfig, - e.shardCoordinator, - e.coreComponentsHolder.PathHandler(), - e.coreComponentsHolder.InternalMarshalizer(), - e.coreComponentsHolder.Hasher(), - e.baseData.lastEpoch, - e.coreComponentsHolder.Uint64ByteSliceConverter(), - e.coreComponentsHolder.NodeTypeProvider(), - e.nodeProcessingMode, - e.cryptoComponentsHolder.ManagedPeersHolder(), - e.stateStatsHandler, - ) + argsStorageHandler := StorageHandlerArgs{ + GeneralConfig: e.generalConfig, + PreferencesConfig: e.prefsConfig, + ShardCoordinator: e.shardCoordinator, + PathManagerHandler: e.coreComponentsHolder.PathHandler(), + Marshaller: e.coreComponentsHolder.InternalMarshalizer(), + Hasher: e.coreComponentsHolder.Hasher(), + CurrentEpoch: e.baseData.lastEpoch, + Uint64Converter: e.coreComponentsHolder.Uint64ByteSliceConverter(), + NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), + NodesCoordinatorRegistryFactory: e.nodesCoordinatorRegistryFactory, + ManagedPeersHolder: e.cryptoComponentsHolder.ManagedPeersHolder(), + NodeProcessingMode: e.nodeProcessingMode, + StateStatsHandler: e.stateStatsHandler, + } + storageHandlerComponent, err := NewShardStorageHandler(argsStorageHandler) if err != nil { return err } @@ -1220,22 +1230,23 @@ func (e *epochStartBootstrap) createResolversContainer() error { // this one should only be used before determining the correct shard where the node should reside log.Debug("epochStartBootstrap.createRequestHandler", "shard", e.shardCoordinator.SelfId()) resolversContainerArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: e.shardCoordinator, - MainMessenger: e.mainMessenger, - FullArchiveMessenger: e.fullArchiveMessenger, - Store: storageService, - Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), - DataPools: e.dataPool, - Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), - NumConcurrentResolvingJobs: 10, - DataPacker: dataPacker, - TriesContainer: e.trieContainer, - SizeCheckDelta: 0, - InputAntifloodHandler: disabled.NewAntiFloodHandler(), - OutputAntifloodHandler: disabled.NewAntiFloodHandler(), - MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), - FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), - PayloadValidator: payloadValidator, + ShardCoordinator: e.shardCoordinator, + MainMessenger: e.mainMessenger, + FullArchiveMessenger: e.fullArchiveMessenger, + Store: storageService, + Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), + DataPools: e.dataPool, + Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + DataPacker: dataPacker, + TriesContainer: e.trieContainer, + SizeCheckDelta: 0, + InputAntifloodHandler: disabled.NewAntiFloodHandler(), + OutputAntifloodHandler: disabled.NewAntiFloodHandler(), + MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), + FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), + PayloadValidator: payloadValidator, } resolverFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerArgs) if err != nil { diff --git a/epochStart/bootstrap/process_test.go b/epochStart/bootstrap/process_test.go index d95d97282d5..552148003d6 100644 --- a/epochStart/bootstrap/process_test.go +++ b/epochStart/bootstrap/process_test.go @@ -41,6 +41,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" @@ -85,7 +86,14 @@ func createComponentsForEpochStart() (*mock.CoreComponentsMock, *mock.CryptoComp NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, ProcessStatusHandlerInstance: &testscommon.ProcessStatusHandlerStub{}, HardforkTriggerPubKeyField: []byte("provided hardfork pub key"), - EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.StakingV4Step2Flag { + return 99999 + } + return 0 + }, + }, }, &mock.CryptoComponentsMock{ PubKey: &cryptoMocks.PublicKeyStub{}, @@ -111,9 +119,9 @@ func createMockEpochStartBootstrapArgs( MainMessenger: &p2pmocks.MessengerStub{ ConnectedPeersCalled: func() []core.PeerID { return []core.PeerID{"peer0", "peer1", "peer2", "peer3", "peer4", "peer5"} - }, - }, - FullArchiveMessenger: &p2pmocks.MessengerStub{}, + }}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, GeneralConfig: config.Config{ MiniBlocksStorage: generalCfg.MiniBlocksStorage, PeerBlockBodyStorage: generalCfg.PeerBlockBodyStorage, @@ -205,7 +213,7 @@ func createMockEpochStartBootstrapArgs( return 1 }, }, - GenesisNodesConfig: &mock.NodesSetupStub{}, + GenesisNodesConfig: &genesisMocks.NodesSetupStub{}, GenesisShardCoordinator: mock.NewMultipleShardsCoordinatorMock(), Rater: &mock.RaterStub{}, DestinationShardAsObserver: 0, @@ -213,7 +221,7 @@ func createMockEpochStartBootstrapArgs( RoundHandler: &mock.RoundHandlerStub{}, LatestStorageDataProvider: &mock.LatestStorageDataProviderStub{}, StorageUnitOpener: &storageMocks.UnitOpenerStub{}, - ArgumentsParser: &mock.ArgumentParserMock{}, + ArgumentsParser: &testscommon.ArgumentParserMock{}, StatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, HeaderIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, DataSyncerCreator: &scheduledDataSyncer.ScheduledSyncerFactoryStub{ @@ -794,7 +802,7 @@ func TestIsStartInEpochZero(t *testing.T) { coreComp, cryptoComp := createComponentsForEpochStart() args := createMockEpochStartBootstrapArgs(coreComp, cryptoComp) - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetStartTimeCalled: func() int64 { return 1000 }, @@ -828,7 +836,7 @@ func TestEpochStartBootstrap_BootstrapShouldStartBootstrapProcess(t *testing.T) roundDuration := uint64(60000) coreComp, cryptoComp := createComponentsForEpochStart() args := createMockEpochStartBootstrapArgs(coreComp, cryptoComp) - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return roundDuration }, @@ -887,7 +895,7 @@ func TestPrepareForEpochZero_NodeInGenesisShouldNotAlterShardID(t *testing.T) { } args.DestinationShardAsObserver = uint32(7) - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { eligibleMap := map[uint32][]nodesCoordinator.GenesisNodeInfoHandler{ 1: {mock.NewNodeInfo([]byte("addr"), []byte("pubKey11"), 1, initRating)}, @@ -922,7 +930,7 @@ func TestPrepareForEpochZero_NodeNotInGenesisShouldAlterShardID(t *testing.T) { }, } args.DestinationShardAsObserver = desiredShardAsObserver - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { eligibleMap := map[uint32][]nodesCoordinator.GenesisNodeInfoHandler{ 1: {mock.NewNodeInfo([]byte("addr"), []byte("pubKey11"), 1, initRating)}, @@ -1487,7 +1495,7 @@ func getNodesConfigMock(numOfShards uint32) sharding.GenesisNodesSetupHandler { roundDurationMillis := 4000 epochDurationMillis := 50 * int64(roundDurationMillis) - nodesConfig := &mock.NodesSetupStub{ + nodesConfig := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) for i := uint32(0); i < numOfShards; i++ { diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index 881aedf74c2..49535a7228c 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -10,17 +10,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/typeConverters" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" - "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/factory" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -30,36 +24,28 @@ type shardStorageHandler struct { } // NewShardStorageHandler will return a new instance of shardStorageHandler -func NewShardStorageHandler( - generalConfig config.Config, - prefsConfig config.PreferencesConfig, - shardCoordinator sharding.Coordinator, - pathManagerHandler storage.PathManagerHandler, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - currentEpoch uint32, - uint64Converter typeConverters.Uint64ByteSliceConverter, - nodeTypeProvider core.NodeTypeProviderHandler, - nodeProcessingMode common.NodeProcessingMode, - managedPeersHolder common.ManagedPeersHolder, - stateStatsHandler common.StateStatisticsHandler, -) (*shardStorageHandler, error) { +func NewShardStorageHandler(args StorageHandlerArgs) (*shardStorageHandler, error) { + err := checkNilArgs(args) + if err != nil { + return nil, err + } + epochStartNotifier := &disabled.EpochStartNotifier{} storageFactory, err := factory.NewStorageServiceFactory( factory.StorageServiceFactoryArgs{ - Config: generalConfig, - PrefsConfig: prefsConfig, - ShardCoordinator: shardCoordinator, - PathManager: pathManagerHandler, + Config: args.GeneralConfig, + PrefsConfig: args.PreferencesConfig, + ShardCoordinator: args.ShardCoordinator, + PathManager: args.PathManagerHandler, EpochStartNotifier: epochStartNotifier, - NodeTypeProvider: nodeTypeProvider, - CurrentEpoch: currentEpoch, + NodeTypeProvider: args.NodeTypeProvider, StorageType: factory.BootstrapStorageService, + ManagedPeersHolder: args.ManagedPeersHolder, + CurrentEpoch: args.CurrentEpoch, CreateTrieEpochRootHashStorer: false, - NodeProcessingMode: nodeProcessingMode, + NodeProcessingMode: args.NodeProcessingMode, RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time - ManagedPeersHolder: managedPeersHolder, - StateStatsHandler: stateStatsHandler, + StateStatsHandler: args.StateStatsHandler, }, ) if err != nil { @@ -72,12 +58,13 @@ func NewShardStorageHandler( } base := &baseStorageHandler{ - storageService: storageService, - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - currentEpoch: currentEpoch, - uint64Converter: uint64Converter, + storageService: storageService, + shardCoordinator: args.ShardCoordinator, + marshalizer: args.Marshaller, + hasher: args.Hasher, + currentEpoch: args.CurrentEpoch, + uint64Converter: args.Uint64Converter, + nodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } return &shardStorageHandler{baseStorageHandler: base}, nil @@ -123,7 +110,7 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededFo return err } - components.NodesConfig.CurrentEpoch = components.ShardHeader.GetEpoch() + components.NodesConfig.SetCurrentEpoch(components.ShardHeader.GetEpoch()) nodesCoordinatorConfigKey, err := ssh.saveNodesCoordinatorRegistry(components.EpochStartMetaBlock, components.NodesConfig) if err != nil { return err diff --git a/epochStart/bootstrap/shardStorageHandler_test.go b/epochStart/bootstrap/shardStorageHandler_test.go index b27f13df28b..018bc4b99b8 100644 --- a/epochStart/bootstrap/shardStorageHandler_test.go +++ b/epochStart/bootstrap/shardStorageHandler_test.go @@ -13,24 +13,13 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/typeConverters" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/statistics/disabled" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/epochStart/mock" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" - "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-go/testscommon" epochStartMocks "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks/epochStart" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" - "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -41,21 +30,8 @@ func TestNewShardStorageHandler_ShouldWork(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, err := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, err := NewShardStorageHandler(args) assert.False(t, check.IfNil(shardStorage)) assert.Nil(t, err) @@ -66,21 +42,8 @@ func TestShardStorageHandler_SaveDataToStorageShardDataNotFound(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Epoch: 1}, @@ -97,21 +60,8 @@ func TestShardStorageHandler_SaveDataToStorageMissingHeader(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{ @@ -151,21 +101,8 @@ func testShardWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber }() counter := 0 - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shardStorage.storageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { counter++ @@ -206,21 +143,8 @@ func TestShardStorageHandler_SaveDataToStorage(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) hash1 := []byte("hash1") hdr1 := block.MetaBlock{ @@ -318,21 +242,8 @@ func TestShardStorageHandler_getCrossProcessedMiniBlockHeadersDestMe(t *testing. mbs := append(intraMbs, crossMbs...) - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shardHeader := &block.Header{ Nonce: 100, MiniBlockHeaders: mbs, @@ -351,21 +262,8 @@ func TestShardStorageHandler_getCrossProcessedMiniBlockHeadersDestMe(t *testing. func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledErrorGettingProcessedAndPendingMbs(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{}, @@ -382,21 +280,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledErrorG func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledNoScheduled(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, err := shardStorage.getProcessedAndPendingMiniBlocksWithScheduled(scenario.metaBlock, scenario.headers, scenario.shardHeader, false) @@ -410,21 +295,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledNoSche func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledWrongHeaderType(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() wrongShardHeader := &block.MetaBlock{} @@ -445,21 +317,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledWrongH func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduled(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, err := shardStorage.getProcessedAndPendingMiniBlocksWithScheduled(scenario.metaBlock, scenario.headers, scenario.shardHeader, true) @@ -626,21 +485,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksErrorGettingEpochSt _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{}, @@ -662,21 +508,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksMissingHeader(t *te }() lastFinishedMetaBlock := "last finished meta block" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{ @@ -701,21 +534,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWrongHeader(t *test lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) meta := &block.MetaBlock{ @@ -745,21 +565,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNilMetaBlock(t *tes lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) meta := &block.MetaBlock{ @@ -791,21 +598,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNoProcessedNoPendin lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) lastFinishedHeaders[0].PendingMiniBlockHeaders = nil @@ -833,21 +627,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNoProcessedNoPendin func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithProcessedAndPendingMbs(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, firstPendingMetaBlockHash, err := shardStorage.getProcessedAndPendingMiniBlocks(scenario.metaBlock, scenario.headers) @@ -864,21 +645,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledGetSha _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) headers := map[string]data.HeaderHandler{} meta := &block.MetaBlock{ @@ -898,21 +666,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledMissin _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -940,21 +695,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledWrongT _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -984,26 +726,12 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledErrorW _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() + args := createStorageHandlerArgs() expectedErr := fmt.Errorf("expected error") - // Simulate an error when writing to storage with a mock marshaller - args.marshalizer = &marshallerMock.MarshalizerStub{MarshalCalled: func(obj interface{}) ([]byte, error) { + args.Marshaller = &marshallerMock.MarshalizerStub{MarshalCalled: func(obj interface{}) ([]byte, error) { return nil, expectedErr }} - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1033,21 +761,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduled(t *te _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1082,21 +797,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduledErrorUpda _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1125,21 +827,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduled(t *testi _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" prevMetaHash := "prev metaHlock hash" @@ -1351,36 +1040,6 @@ func Test_getShardHeaderAndMetaHashes(t *testing.T) { require.Equal(t, metaHashes, headers[shardHdrKey].(data.ShardHeaderHandler).GetMetaBlockHashes()) } -type shardStorageArgs struct { - generalConfig config.Config - prefsConfig config.PreferencesConfig - shardCoordinator sharding.Coordinator - pathManagerHandler storage.PathManagerHandler - marshalizer marshal.Marshalizer - hasher hashing.Hasher - currentEpoch uint32 - uint64Converter typeConverters.Uint64ByteSliceConverter - nodeTypeProvider core.NodeTypeProviderHandler - nodeProcessingMode common.NodeProcessingMode - managedPeersHolder common.ManagedPeersHolder -} - -func createDefaultShardStorageArgs() shardStorageArgs { - return shardStorageArgs{ - generalConfig: testscommon.GetGeneralConfig(), - prefsConfig: config.PreferencesConfig{}, - shardCoordinator: &mock.ShardCoordinatorStub{}, - pathManagerHandler: &testscommon.PathManagerStub{}, - marshalizer: &mock.MarshalizerMock{}, - hasher: &hashingMocks.HasherMock{}, - currentEpoch: 0, - uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - nodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - nodeProcessingMode: common.Normal, - managedPeersHolder: &testscommon.ManagedPeersHolderStub{}, - } -} - func createDefaultEpochStartShardData(lastFinishedMetaBlockHash []byte, shardHeaderHash []byte) []block.EpochStartShardData { return []block.EpochStartShardData{ { @@ -1451,7 +1110,6 @@ func createPendingAndProcessedMiniBlocksScenario() scenarioData { expectedPendingMbsWithScheduled := []bootstrapStorage.PendingMiniBlocksInfo{ {ShardID: 0, MiniBlocksHashes: [][]byte{crossMbHeaders[1].Hash, crossMbHeaders[2].Hash, crossMbHeaders[3].Hash, crossMbHeaders[4].Hash, crossMbHeaders[0].Hash}}, } - expectedProcessedMbsWithScheduled := make([]bootstrapStorage.MiniBlocksInMeta, 0) headers := map[string]data.HeaderHandler{ lastFinishedMetaBlockHash: &block.MetaBlock{ @@ -1492,7 +1150,7 @@ func createPendingAndProcessedMiniBlocksScenario() scenarioData { expectedPendingMbs: expectedPendingMiniBlocks, expectedProcessedMbs: expectedProcessedMiniBlocks, expectedPendingMbsWithScheduled: expectedPendingMbsWithScheduled, - expectedProcessedMbsWithScheduled: expectedProcessedMbsWithScheduled, + expectedProcessedMbsWithScheduled: []bootstrapStorage.MiniBlocksInMeta{}, } } diff --git a/epochStart/bootstrap/storageProcess.go b/epochStart/bootstrap/storageProcess.go index 0f87b3626e7..809b0dfbb8b 100644 --- a/epochStart/bootstrap/storageProcess.go +++ b/epochStart/bootstrap/storageProcess.go @@ -404,19 +404,20 @@ func (sesb *storageEpochStartBootstrap) processNodesConfig(pubKey []byte) error shardId = sesb.genesisShardCoordinator.SelfId() } argsNewValidatorStatusSyncers := ArgsNewSyncValidatorStatus{ - DataPool: sesb.dataPool, - Marshalizer: sesb.coreComponentsHolder.InternalMarshalizer(), - RequestHandler: sesb.requestHandler, - ChanceComputer: sesb.rater, - GenesisNodesConfig: sesb.genesisNodesConfig, - NodeShuffler: sesb.nodeShuffler, - Hasher: sesb.coreComponentsHolder.Hasher(), - PubKey: pubKey, - ShardIdAsObserver: shardId, - ChanNodeStop: sesb.coreComponentsHolder.ChanStopNodeProcess(), - NodeTypeProvider: sesb.coreComponentsHolder.NodeTypeProvider(), - IsFullArchive: sesb.prefsConfig.FullArchive, - EnableEpochsHandler: sesb.coreComponentsHolder.EnableEpochsHandler(), + DataPool: sesb.dataPool, + Marshalizer: sesb.coreComponentsHolder.InternalMarshalizer(), + RequestHandler: sesb.requestHandler, + ChanceComputer: sesb.rater, + GenesisNodesConfig: sesb.genesisNodesConfig, + NodeShuffler: sesb.nodeShuffler, + Hasher: sesb.coreComponentsHolder.Hasher(), + PubKey: pubKey, + ShardIdAsObserver: shardId, + ChanNodeStop: sesb.coreComponentsHolder.ChanStopNodeProcess(), + NodeTypeProvider: sesb.coreComponentsHolder.NodeTypeProvider(), + IsFullArchive: sesb.prefsConfig.FullArchive, + EnableEpochsHandler: sesb.coreComponentsHolder.EnableEpochsHandler(), + NodesCoordinatorRegistryFactory: sesb.nodesCoordinatorRegistryFactory, } sesb.nodesConfigHandler, err = NewSyncValidatorStatus(argsNewValidatorStatusSyncers) if err != nil { diff --git a/epochStart/bootstrap/storageProcess_test.go b/epochStart/bootstrap/storageProcess_test.go index 78288156144..a59b0d125f2 100644 --- a/epochStart/bootstrap/storageProcess_test.go +++ b/epochStart/bootstrap/storageProcess_test.go @@ -22,6 +22,7 @@ import ( epochStartMocks "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks/epochStart" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/stretchr/testify/assert" ) @@ -92,7 +93,7 @@ func TestStorageEpochStartBootstrap_BootstrapFromGenesis(t *testing.T) { return 1 }, } - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return roundDuration }, @@ -116,7 +117,7 @@ func TestStorageEpochStartBootstrap_BootstrapMetablockNotFound(t *testing.T) { return 1 }, } - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return roundDuration }, diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index 0a74d4151fb..0bcb9308311 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -38,19 +38,20 @@ type syncValidatorStatus struct { // ArgsNewSyncValidatorStatus holds the arguments needed for creating a new validator status process component type ArgsNewSyncValidatorStatus struct { - DataPool dataRetriever.PoolsHolder - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - RequestHandler process.RequestHandler - ChanceComputer nodesCoordinator.ChanceComputer - GenesisNodesConfig sharding.GenesisNodesSetupHandler - NodeShuffler nodesCoordinator.NodesShuffler - PubKey []byte - ShardIdAsObserver uint32 - ChanNodeStop chan endProcess.ArgEndProcess - NodeTypeProvider NodeTypeProviderHandler - IsFullArchive bool - EnableEpochsHandler common.EnableEpochsHandler + DataPool dataRetriever.PoolsHolder + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + RequestHandler process.RequestHandler + ChanceComputer nodesCoordinator.ChanceComputer + GenesisNodesConfig sharding.GenesisNodesSetupHandler + NodeShuffler nodesCoordinator.NodesShuffler + PubKey []byte + ShardIdAsObserver uint32 + ChanNodeStop chan endProcess.ArgEndProcess + NodeTypeProvider NodeTypeProviderHandler + IsFullArchive bool + EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } // NewSyncValidatorStatus creates a new validator status process component @@ -111,26 +112,27 @@ func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStat s.memDB = disabled.CreateMemUnit() argsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: int(args.GenesisNodesConfig.GetShardConsensusGroupSize()), - MetaConsensusGroupSize: int(args.GenesisNodesConfig.GetMetaConsensusGroupSize()), - Marshalizer: args.Marshalizer, - Hasher: args.Hasher, - Shuffler: args.NodeShuffler, - EpochStartNotifier: &disabled.EpochStartNotifier{}, - BootStorer: s.memDB, - ShardIDAsObserver: args.ShardIdAsObserver, - NbShards: args.GenesisNodesConfig.NumberOfShards(), - EligibleNodes: eligibleValidators, - WaitingNodes: waitingValidators, - SelfPublicKey: args.PubKey, - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: disabled.NewShuffledOutHandler(), - ChanStopNode: args.ChanNodeStop, - NodeTypeProvider: args.NodeTypeProvider, - IsFullArchive: args.IsFullArchive, - EnableEpochsHandler: args.EnableEpochsHandler, - ValidatorInfoCacher: s.dataPool.CurrentEpochValidatorInfo(), - GenesisNodesSetupHandler: s.genesisNodesConfig, + ShardConsensusGroupSize: int(args.GenesisNodesConfig.GetShardConsensusGroupSize()), + MetaConsensusGroupSize: int(args.GenesisNodesConfig.GetMetaConsensusGroupSize()), + Marshalizer: args.Marshalizer, + Hasher: args.Hasher, + Shuffler: args.NodeShuffler, + EpochStartNotifier: &disabled.EpochStartNotifier{}, + BootStorer: s.memDB, + ShardIDAsObserver: args.ShardIdAsObserver, + NbShards: args.GenesisNodesConfig.NumberOfShards(), + EligibleNodes: eligibleValidators, + WaitingNodes: waitingValidators, + SelfPublicKey: args.PubKey, + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: disabled.NewShuffledOutHandler(), + ChanStopNode: args.ChanNodeStop, + NodeTypeProvider: args.NodeTypeProvider, + IsFullArchive: args.IsFullArchive, + EnableEpochsHandler: args.EnableEpochsHandler, + ValidatorInfoCacher: s.dataPool.CurrentEpochValidatorInfo(), + GenesisNodesSetupHandler: s.genesisNodesConfig, + NodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } baseNodesCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argsNodesCoordinator) if err != nil { @@ -151,7 +153,7 @@ func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStat func (s *syncValidatorStatus) NodesConfigFromMetaBlock( currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler, -) (*nodesCoordinator.NodesCoordinatorRegistry, uint32, []*block.MiniBlock, error) { +) (nodesCoordinator.NodesCoordinatorRegistryHandler, uint32, []*block.MiniBlock, error) { if currMetaBlock.GetNonce() > 1 && !currMetaBlock.IsStartOfEpochBlock() { return nil, 0, nil, epochStart.ErrNotEpochStartBlock } @@ -177,8 +179,8 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock( return nil, 0, nil, err } - nodesConfig := s.nodeCoordinator.NodesCoordinatorToRegistry() - nodesConfig.CurrentEpoch = currMetaBlock.GetEpoch() + nodesConfig := s.nodeCoordinator.NodesCoordinatorToRegistry(currMetaBlock.GetEpoch()) + nodesConfig.SetCurrentEpoch(currMetaBlock.GetEpoch()) return nodesConfig, selfShardId, allMiniblocks, nil } diff --git a/epochStart/bootstrap/syncValidatorStatus_test.go b/epochStart/bootstrap/syncValidatorStatus_test.go index f7e409af875..7cfe6061c77 100644 --- a/epochStart/bootstrap/syncValidatorStatus_test.go +++ b/epochStart/bootstrap/syncValidatorStatus_test.go @@ -17,6 +17,7 @@ import ( epochStartMocks "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks/epochStart" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -246,6 +247,11 @@ func TestSyncValidatorStatus_getPeerBlockBodyForMeta(t *testing.T) { } func getSyncValidatorStatusArgs() ArgsNewSyncValidatorStatus { + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &mock.MarshalizerMock{}, + 444, + ) + return ArgsNewSyncValidatorStatus{ DataPool: &dataRetrieverMock.PoolsHolderStub{ MiniBlocksCalled: func() storage.Cacher { @@ -259,7 +265,7 @@ func getSyncValidatorStatusArgs() ArgsNewSyncValidatorStatus { Hasher: &hashingMocks.HasherMock{}, RequestHandler: &testscommon.RequestHandlerStub{}, ChanceComputer: &shardingMocks.NodesCoordinatorStub{}, - GenesisNodesConfig: &mock.NodesSetupStub{ + GenesisNodesConfig: &genesisMocks.NodesSetupStub{ NumberOfShardsCalled: func() uint32 { return 1 }, @@ -301,12 +307,13 @@ func getSyncValidatorStatusArgs() ArgsNewSyncValidatorStatus { return 2 }, }, - NodeShuffler: &shardingMocks.NodeShufflerMock{}, - PubKey: []byte("public key"), - ShardIdAsObserver: 0, - ChanNodeStop: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + NodeShuffler: &shardingMocks.NodeShufflerMock{}, + PubKey: []byte("public key"), + ShardIdAsObserver: 0, + ChanNodeStop: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } } diff --git a/epochStart/dtos.go b/epochStart/dtos.go new file mode 100644 index 00000000000..ecac1a4217f --- /dev/null +++ b/epochStart/dtos.go @@ -0,0 +1,24 @@ +package epochStart + +import ( + "math/big" + + "github.com/multiversx/mx-chain-go/state" +) + +// OwnerData is a struct containing relevant information about owner's nodes data +type OwnerData struct { + NumStakedNodes int64 + NumActiveNodes int64 + TotalTopUp *big.Int + TopUpPerNode *big.Int + AuctionList []state.ValidatorInfoHandler + Qualified bool +} + +// ValidatorStatsInEpoch holds validator stats in an epoch +type ValidatorStatsInEpoch struct { + Eligible map[uint32]int + Waiting map[uint32]int + Leaving map[uint32]int +} diff --git a/epochStart/errors.go b/epochStart/errors.go index 3f705f585fd..ca115e939f4 100644 --- a/epochStart/errors.go +++ b/epochStart/errors.go @@ -281,6 +281,9 @@ var ErrSystemValidatorSCCall = errors.New("system validator sc call failed") // ErrOwnerDoesntHaveEligibleNodesInEpoch signals that the owner doesn't have any eligible nodes in epoch var ErrOwnerDoesntHaveEligibleNodesInEpoch = errors.New("owner has no eligible nodes in epoch") +// ErrOwnerDoesntHaveNodesInEpoch signals that the owner has no nodes in epoch +var ErrOwnerDoesntHaveNodesInEpoch = errors.New("owner has no nodes in epoch") + // ErrInvalidMaxHardCapForMissingNodes signals that the maximum hardcap value for missing nodes is invalid var ErrInvalidMaxHardCapForMissingNodes = errors.New("invalid max hardcap for missing nodes") @@ -331,3 +334,21 @@ var ErrNilManagedPeersHolder = errors.New("nil managed peers holder") // ErrNilExecutionOrderHandler signals that a nil execution order handler has been provided var ErrNilExecutionOrderHandler = errors.New("nil execution order handler") + +// ErrReceivedNewListNodeInStakingV4 signals that a new node has been assigned in common.NewList instead of common.AuctionList after staking v4 +var ErrReceivedNewListNodeInStakingV4 = errors.New("new node has been assigned in common.NewList instead of common.AuctionList after staking v4") + +// ErrNilMaxNodesChangeConfigProvider signals that a nil nodes config provider has been provided +var ErrNilMaxNodesChangeConfigProvider = errors.New("nil nodes config provider has been provided") + +// ErrNilAuctionListSelector signals that a nil auction list selector has been provided +var ErrNilAuctionListSelector = errors.New("nil auction list selector has been provided") + +// ErrOwnerHasNoStakedNode signals that the owner has no staked node +var ErrOwnerHasNoStakedNode = errors.New("owner has no staked node") + +// ErrUint32SubtractionOverflow signals uint32 subtraction overflowed +var ErrUint32SubtractionOverflow = errors.New("uint32 subtraction overflowed") + +// ErrReceivedAuctionValidatorsBeforeStakingV4 signals that an auction node has been provided before enabling staking v4 +var ErrReceivedAuctionValidatorsBeforeStakingV4 = errors.New("auction node has been provided before enabling staking v4") diff --git a/epochStart/interface.go b/epochStart/interface.go index fc4364afc43..37df49df292 100644 --- a/epochStart/interface.go +++ b/epochStart/interface.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -85,14 +86,6 @@ type Notifier interface { IsInterfaceNil() bool } -// ValidatorStatisticsProcessorHandler defines the actions for processing validator statistics -// needed in the epoch events -type ValidatorStatisticsProcessorHandler interface { - Process(info data.ShardValidatorInfoHandler) error - Commit() ([]byte, error) - IsInterfaceNil() bool -} - // ValidatorInfoCreator defines the methods to create a validator info type ValidatorInfoCreator interface { PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo @@ -161,9 +154,13 @@ type StakingDataProvider interface { GetTotalStakeEligibleNodes() *big.Int GetTotalTopUpStakeEligibleNodes() *big.Int GetNodeStakedTopUp(blsKey []byte) (*big.Int, error) - PrepareStakingDataForRewards(keys map[uint32][][]byte) error - FillValidatorInfo(blsKey []byte) error - ComputeUnQualifiedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) + PrepareStakingData(validatorsMap state.ShardValidatorsInfoMapHandler) error + FillValidatorInfo(validator state.ValidatorInfoHandler) error + ComputeUnQualifiedNodes(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) + GetBlsKeyOwner(blsKey []byte) (string, error) + GetNumOfValidatorsInCurrentEpoch() uint32 + GetCurrentEpochValidatorStats() ValidatorStatsInEpoch + GetOwnersData() map[string]*OwnerData Clean() IsInterfaceNil() bool } @@ -186,10 +183,10 @@ type EpochEconomicsDataProvider interface { // RewardsCreator defines the functionality for the metachain to create rewards at end of epoch type RewardsCreator interface { CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error GetProtocolSustainabilityRewards() *big.Int GetLocalTxCache() TransactionCacher @@ -214,3 +211,21 @@ type EpochStartNotifier interface { RegisterHandler(handler ActionHandler) IsInterfaceNil() bool } + +// MaxNodesChangeConfigProvider provides all config.MaxNodesChangeConfig, as well as +// the current config.MaxNodesChangeConfig based on the current epoch +type MaxNodesChangeConfigProvider interface { + GetAllNodesConfig() []config.MaxNodesChangeConfig + GetCurrentNodesConfig() config.MaxNodesChangeConfig + EpochConfirmed(epoch uint32, round uint64) + IsInterfaceNil() bool +} + +// AuctionListSelector handles selection of nodes from auction list to be sent to waiting list, based on their top up +type AuctionListSelector interface { + SelectNodesFromAuctionList( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, + ) error + IsInterfaceNil() bool +} diff --git a/epochStart/metachain/auctionListDisplayer.go b/epochStart/metachain/auctionListDisplayer.go new file mode 100644 index 00000000000..d64a156a51c --- /dev/null +++ b/epochStart/metachain/auctionListDisplayer.go @@ -0,0 +1,232 @@ +package metachain + +import ( + "math/big" + "strconv" + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/config" + errorsCommon "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/state" + logger "github.com/multiversx/mx-chain-logger-go" +) + +const maxPubKeyDisplayableLen = 20 +const maxNumOfDecimalsToDisplay = 5 + +type auctionListDisplayer struct { + softAuctionConfig *auctionConfig + tableDisplayer TableDisplayHandler + validatorPubKeyConverter core.PubkeyConverter + addressPubKeyConverter core.PubkeyConverter +} + +// ArgsAuctionListDisplayer is a struct placeholder for arguments needed to create an auction list displayer +type ArgsAuctionListDisplayer struct { + TableDisplayHandler TableDisplayHandler + ValidatorPubKeyConverter core.PubkeyConverter + AddressPubKeyConverter core.PubkeyConverter + AuctionConfig config.SoftAuctionConfig + Denomination int +} + +// NewAuctionListDisplayer creates an auction list data displayer, useful for debugging purposes during selection process +func NewAuctionListDisplayer(args ArgsAuctionListDisplayer) (*auctionListDisplayer, error) { + softAuctionConfig, err := getAuctionConfig(args.AuctionConfig, args.Denomination) + if err != nil { + return nil, err + } + + err = checkDisplayerNilArgs(args) + if err != nil { + return nil, err + } + + return &auctionListDisplayer{ + softAuctionConfig: softAuctionConfig, + tableDisplayer: args.TableDisplayHandler, + validatorPubKeyConverter: args.ValidatorPubKeyConverter, + addressPubKeyConverter: args.AddressPubKeyConverter, + }, nil +} + +func checkDisplayerNilArgs(args ArgsAuctionListDisplayer) error { + if check.IfNil(args.TableDisplayHandler) { + return errNilTableDisplayHandler + } + if check.IfNil(args.ValidatorPubKeyConverter) { + return errorsCommon.ErrNilValidatorPublicKeyConverter + } + if check.IfNil(args.AddressPubKeyConverter) { + return errorsCommon.ErrNilAddressPublicKeyConverter + } + + return nil +} + +// DisplayOwnersData will display initial owners data for auction selection +func (ald *auctionListDisplayer) DisplayOwnersData(ownersData map[string]*OwnerAuctionData) { + if log.GetLevel() > logger.LogDebug { + return + } + + tableHeader := []string{ + "Owner", + "Num staked nodes", + "Num active nodes", + "Num auction nodes", + "Total top up", + "Top up per node", + "Auction list nodes", + } + + lines := make([]*display.LineData, 0, len(ownersData)) + for ownerPubKey, owner := range ownersData { + line := []string{ + ald.addressPubKeyConverter.SilentEncode([]byte(ownerPubKey), log), + strconv.Itoa(int(owner.numStakedNodes)), + strconv.Itoa(int(owner.numActiveNodes)), + strconv.Itoa(int(owner.numAuctionNodes)), + getPrettyValue(owner.totalTopUp, ald.softAuctionConfig.denominator), + getPrettyValue(owner.topUpPerNode, ald.softAuctionConfig.denominator), + ald.getShortDisplayableBlsKeys(owner.auctionList), + } + lines = append(lines, display.NewLineData(false, line)) + } + + ald.tableDisplayer.DisplayTable(tableHeader, lines, "Initial nodes config in auction list") +} + +func getPrettyValue(val *big.Int, denominator *big.Int) string { + first := big.NewInt(0).Div(val, denominator).String() + decimals := big.NewInt(0).Mod(val, denominator).String() + + zeroesCt := (len(denominator.String()) - len(decimals)) - 1 + zeroesCt = core.MaxInt(zeroesCt, 0) + zeroes := strings.Repeat("0", zeroesCt) + + second := zeroes + decimals + if len(second) > maxNumOfDecimalsToDisplay { + second = second[:maxNumOfDecimalsToDisplay] + } + + return first + "." + second +} + +func (ald *auctionListDisplayer) getShortDisplayableBlsKeys(list []state.ValidatorInfoHandler) string { + pubKeys := "" + + for idx, validator := range list { + pubKeys += ald.getShortKey(validator.GetPublicKey()) + addDelimiter := idx != len(list)-1 + if addDelimiter { + pubKeys += ", " + } + } + + return pubKeys +} + +func (ald *auctionListDisplayer) getShortKey(pubKey []byte) string { + pubKeyHex := ald.validatorPubKeyConverter.SilentEncode(pubKey, log) + displayablePubKey := pubKeyHex + + pubKeyLen := len(displayablePubKey) + if pubKeyLen > maxPubKeyDisplayableLen { + displayablePubKey = pubKeyHex[:maxPubKeyDisplayableLen/2] + "..." + pubKeyHex[pubKeyLen-maxPubKeyDisplayableLen/2:] + } + + return displayablePubKey +} + +// DisplayOwnersSelectedNodes will display owners' selected nodes +func (ald *auctionListDisplayer) DisplayOwnersSelectedNodes(ownersData map[string]*OwnerAuctionData) { + if log.GetLevel() > logger.LogDebug { + return + } + + tableHeader := []string{ + "Owner", + "Num staked nodes", + "TopUp per node", + "Total top up", + "Num auction nodes", + "Num qualified auction nodes", + "Num active nodes", + "Qualified top up per node", + "Selected auction list nodes", + } + + lines := make([]*display.LineData, 0, len(ownersData)) + for ownerPubKey, owner := range ownersData { + line := []string{ + ald.addressPubKeyConverter.SilentEncode([]byte(ownerPubKey), log), + strconv.Itoa(int(owner.numStakedNodes)), + getPrettyValue(owner.topUpPerNode, ald.softAuctionConfig.denominator), + getPrettyValue(owner.totalTopUp, ald.softAuctionConfig.denominator), + strconv.Itoa(int(owner.numAuctionNodes)), + strconv.Itoa(int(owner.numQualifiedAuctionNodes)), + strconv.Itoa(int(owner.numActiveNodes)), + getPrettyValue(owner.qualifiedTopUpPerNode, ald.softAuctionConfig.denominator), + ald.getShortDisplayableBlsKeys(owner.auctionList[:owner.numQualifiedAuctionNodes]), + } + lines = append(lines, display.NewLineData(false, line)) + } + + ald.tableDisplayer.DisplayTable(tableHeader, lines, "Selected nodes config from auction list") +} + +// DisplayAuctionList will display the final selected auction nodes +func (ald *auctionListDisplayer) DisplayAuctionList( + auctionList []state.ValidatorInfoHandler, + ownersData map[string]*OwnerAuctionData, + numOfSelectedNodes uint32, +) { + if log.GetLevel() > logger.LogDebug { + return + } + + tableHeader := []string{"Owner", "Registered key", "Qualified TopUp per node"} + lines := make([]*display.LineData, 0, len(auctionList)) + blsKeysOwnerMap := getBlsKeyOwnerMap(ownersData) + for idx, validator := range auctionList { + pubKey := validator.GetPublicKey() + pubKeyEncoded := ald.validatorPubKeyConverter.SilentEncode(pubKey, log) + owner, found := blsKeysOwnerMap[string(pubKey)] + if !found { + log.Error("auctionListSelector.displayAuctionList could not find owner for", + "bls key", pubKeyEncoded) + continue + } + + qualifiedTopUp := ownersData[owner].qualifiedTopUpPerNode + horizontalLine := uint32(idx) == numOfSelectedNodes-1 + line := display.NewLineData(horizontalLine, []string{ + ald.addressPubKeyConverter.SilentEncode([]byte(owner), log), + pubKeyEncoded, + getPrettyValue(qualifiedTopUp, ald.softAuctionConfig.denominator), + }) + lines = append(lines, line) + } + + ald.tableDisplayer.DisplayTable(tableHeader, lines, "Final selected nodes from auction list") +} + +func getBlsKeyOwnerMap(ownersData map[string]*OwnerAuctionData) map[string]string { + ret := make(map[string]string) + for ownerPubKey, owner := range ownersData { + for _, blsKey := range owner.auctionList { + ret[string(blsKey.GetPublicKey())] = ownerPubKey + } + } + + return ret +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ald *auctionListDisplayer) IsInterfaceNil() bool { + return ald == nil +} diff --git a/epochStart/metachain/auctionListDisplayer_test.go b/epochStart/metachain/auctionListDisplayer_test.go new file mode 100644 index 00000000000..68d74e08e41 --- /dev/null +++ b/epochStart/metachain/auctionListDisplayer_test.go @@ -0,0 +1,288 @@ +package metachain + +import ( + "math" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/require" +) + +func createDisplayerArgs() ArgsAuctionListDisplayer { + return ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: createSoftAuctionConfig(), + Denomination: 0, + } +} + +func TestNewAuctionListDisplayer(t *testing.T) { + t.Parallel() + + t.Run("invalid auction config", func(t *testing.T) { + args := createDisplayerArgs() + args.AuctionConfig.MaxNumberOfIterations = 0 + ald, err := NewAuctionListDisplayer(args) + require.Nil(t, ald) + requireInvalidValueError(t, err, "for max number of iterations") + }) + + t.Run("should work", func(t *testing.T) { + args := createDisplayerArgs() + ald, err := NewAuctionListDisplayer(args) + require.Nil(t, err) + require.False(t, ald.IsInterfaceNil()) + }) +} + +func TestAuctionListDisplayer_DisplayOwnersData(t *testing.T) { + _ = logger.SetLogLevel("*:DEBUG") + defer func() { + _ = logger.SetLogLevel("*:INFO") + }() + + owner := []byte("owner") + validator := &state.ValidatorInfo{PublicKey: []byte("pubKey")} + wasDisplayCalled := false + + args := createDisplayerArgs() + args.AddressPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, owner, pkBytes) + return "ownerEncoded" + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, validator.PublicKey, pkBytes) + return "pubKeyEncoded" + }, + } + args.TableDisplayHandler = &testscommon.TableDisplayerMock{ + DisplayTableCalled: func(tableHeader []string, lines []*display.LineData, message string) { + require.Equal(t, []string{ + "Owner", + "Num staked nodes", + "Num active nodes", + "Num auction nodes", + "Total top up", + "Top up per node", + "Auction list nodes", + }, tableHeader) + require.Equal(t, "Initial nodes config in auction list", message) + require.Equal(t, []*display.LineData{ + { + Values: []string{"ownerEncoded", "4", "4", "1", "100.0", "25.0", "pubKeyEncoded"}, + HorizontalRuleAfter: false, + }, + }, lines) + + wasDisplayCalled = true + }, + } + ald, _ := NewAuctionListDisplayer(args) + + ownersData := map[string]*OwnerAuctionData{ + "owner": { + numStakedNodes: 4, + numActiveNodes: 4, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 4, + totalTopUp: big.NewInt(100), + topUpPerNode: big.NewInt(25), + qualifiedTopUpPerNode: big.NewInt(15), + auctionList: []state.ValidatorInfoHandler{&state.ValidatorInfo{PublicKey: []byte("pubKey")}}, + }, + } + + ald.DisplayOwnersData(ownersData) + require.True(t, wasDisplayCalled) +} + +func TestAuctionListDisplayer_DisplayOwnersSelectedNodes(t *testing.T) { + _ = logger.SetLogLevel("*:DEBUG") + defer func() { + _ = logger.SetLogLevel("*:INFO") + }() + + owner := []byte("owner") + validator := &state.ValidatorInfo{PublicKey: []byte("pubKey")} + wasDisplayCalled := false + + args := createDisplayerArgs() + args.AddressPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, owner, pkBytes) + return "ownerEncoded" + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, validator.PublicKey, pkBytes) + return "pubKeyEncoded" + }, + } + args.TableDisplayHandler = &testscommon.TableDisplayerMock{ + DisplayTableCalled: func(tableHeader []string, lines []*display.LineData, message string) { + require.Equal(t, []string{ + "Owner", + "Num staked nodes", + "TopUp per node", + "Total top up", + "Num auction nodes", + "Num qualified auction nodes", + "Num active nodes", + "Qualified top up per node", + "Selected auction list nodes", + }, tableHeader) + require.Equal(t, "Selected nodes config from auction list", message) + require.Equal(t, []*display.LineData{ + { + Values: []string{"ownerEncoded", "4", "25.0", "100.0", "1", "1", "4", "15.0", "pubKeyEncoded"}, + HorizontalRuleAfter: false, + }, + }, lines) + + wasDisplayCalled = true + }, + } + ald, _ := NewAuctionListDisplayer(args) + + ownersData := map[string]*OwnerAuctionData{ + "owner": { + numStakedNodes: 4, + numActiveNodes: 4, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + totalTopUp: big.NewInt(100), + topUpPerNode: big.NewInt(25), + qualifiedTopUpPerNode: big.NewInt(15), + auctionList: []state.ValidatorInfoHandler{&state.ValidatorInfo{PublicKey: []byte("pubKey")}}, + }, + } + + ald.DisplayOwnersSelectedNodes(ownersData) + require.True(t, wasDisplayCalled) +} + +func TestAuctionListDisplayer_DisplayAuctionList(t *testing.T) { + _ = logger.SetLogLevel("*:DEBUG") + defer func() { + _ = logger.SetLogLevel("*:INFO") + }() + + owner := []byte("owner") + validator := &state.ValidatorInfo{PublicKey: []byte("pubKey")} + wasDisplayCalled := false + + args := createDisplayerArgs() + args.AddressPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, owner, pkBytes) + return "ownerEncoded" + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, validator.PublicKey, pkBytes) + return "pubKeyEncoded" + }, + } + args.TableDisplayHandler = &testscommon.TableDisplayerMock{ + DisplayTableCalled: func(tableHeader []string, lines []*display.LineData, message string) { + require.Equal(t, []string{ + "Owner", + "Registered key", + "Qualified TopUp per node", + }, tableHeader) + require.Equal(t, "Final selected nodes from auction list", message) + require.Equal(t, []*display.LineData{ + { + Values: []string{"ownerEncoded", "pubKeyEncoded", "15.0"}, + HorizontalRuleAfter: true, + }, + }, lines) + + wasDisplayCalled = true + }, + } + ald, _ := NewAuctionListDisplayer(args) + + auctionList := []state.ValidatorInfoHandler{&state.ValidatorInfo{PublicKey: []byte("pubKey")}} + ownersData := map[string]*OwnerAuctionData{ + "owner": { + numStakedNodes: 4, + numActiveNodes: 4, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + totalTopUp: big.NewInt(100), + topUpPerNode: big.NewInt(25), + qualifiedTopUpPerNode: big.NewInt(15), + auctionList: auctionList, + }, + } + + ald.DisplayAuctionList(auctionList, ownersData, 1) + require.True(t, wasDisplayCalled) +} + +func TestGetPrettyValue(t *testing.T) { + t.Parallel() + + require.Equal(t, "1234.0", getPrettyValue(big.NewInt(1234), big.NewInt(1))) + require.Equal(t, "123.4", getPrettyValue(big.NewInt(1234), big.NewInt(10))) + require.Equal(t, "12.34", getPrettyValue(big.NewInt(1234), big.NewInt(100))) + require.Equal(t, "1.234", getPrettyValue(big.NewInt(1234), big.NewInt(1000))) + require.Equal(t, "0.1234", getPrettyValue(big.NewInt(1234), big.NewInt(10000))) + require.Equal(t, "0.01234", getPrettyValue(big.NewInt(1234), big.NewInt(100000))) + require.Equal(t, "0.00123", getPrettyValue(big.NewInt(1234), big.NewInt(1000000))) + require.Equal(t, "0.00012", getPrettyValue(big.NewInt(1234), big.NewInt(10000000))) + require.Equal(t, "0.00001", getPrettyValue(big.NewInt(1234), big.NewInt(100000000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1234), big.NewInt(1000000000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1234), big.NewInt(10000000000))) + + require.Equal(t, "1.0", getPrettyValue(big.NewInt(1), big.NewInt(1))) + require.Equal(t, "0.1", getPrettyValue(big.NewInt(1), big.NewInt(10))) + require.Equal(t, "0.01", getPrettyValue(big.NewInt(1), big.NewInt(100))) + require.Equal(t, "0.001", getPrettyValue(big.NewInt(1), big.NewInt(1000))) + require.Equal(t, "0.0001", getPrettyValue(big.NewInt(1), big.NewInt(10000))) + require.Equal(t, "0.00001", getPrettyValue(big.NewInt(1), big.NewInt(100000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1), big.NewInt(1000000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1), big.NewInt(10000000))) + + oneEGLD := big.NewInt(1000000000000000000) + denominationEGLD := big.NewInt(int64(math.Pow10(18))) + + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(0), denominationEGLD)) + require.Equal(t, "1.00000", getPrettyValue(oneEGLD, denominationEGLD)) + require.Equal(t, "1.10000", getPrettyValue(big.NewInt(1100000000000000000), denominationEGLD)) + require.Equal(t, "1.10000", getPrettyValue(big.NewInt(1100000000000000001), denominationEGLD)) + require.Equal(t, "1.11000", getPrettyValue(big.NewInt(1110000000000000001), denominationEGLD)) + require.Equal(t, "0.11100", getPrettyValue(big.NewInt(111000000000000001), denominationEGLD)) + require.Equal(t, "0.01110", getPrettyValue(big.NewInt(11100000000000001), denominationEGLD)) + require.Equal(t, "0.00111", getPrettyValue(big.NewInt(1110000000000001), denominationEGLD)) + require.Equal(t, "0.00011", getPrettyValue(big.NewInt(111000000000001), denominationEGLD)) + require.Equal(t, "0.00001", getPrettyValue(big.NewInt(11100000000001), denominationEGLD)) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1110000000001), denominationEGLD)) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(111000000001), denominationEGLD)) + + require.Equal(t, "2.00000", getPrettyValue(big.NewInt(0).Mul(oneEGLD, big.NewInt(2)), denominationEGLD)) + require.Equal(t, "20.00000", getPrettyValue(big.NewInt(0).Mul(oneEGLD, big.NewInt(20)), denominationEGLD)) + require.Equal(t, "2000000.00000", getPrettyValue(big.NewInt(0).Mul(oneEGLD, big.NewInt(2000000)), denominationEGLD)) + + require.Equal(t, "3.22220", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(2222200000000000000)), denominationEGLD)) + require.Equal(t, "1.22222", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(222220000000000000)), denominationEGLD)) + require.Equal(t, "1.02222", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(22222000000000000)), denominationEGLD)) + require.Equal(t, "1.00222", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(2222200000000000)), denominationEGLD)) + require.Equal(t, "1.00022", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(222220000000000)), denominationEGLD)) + require.Equal(t, "1.00002", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(22222000000000)), denominationEGLD)) + require.Equal(t, "1.00000", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(2222200000000)), denominationEGLD)) + require.Equal(t, "1.00000", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(222220000000)), denominationEGLD)) +} diff --git a/epochStart/metachain/auctionListSelector.go b/epochStart/metachain/auctionListSelector.go new file mode 100644 index 00000000000..96c65e4a579 --- /dev/null +++ b/epochStart/metachain/auctionListSelector.go @@ -0,0 +1,476 @@ +package metachain + +import ( + "fmt" + "math/big" + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" +) + +// OwnerAuctionData holds necessary auction data for an owner +type OwnerAuctionData struct { + numStakedNodes int64 + numActiveNodes int64 + numAuctionNodes int64 + numQualifiedAuctionNodes int64 + totalTopUp *big.Int + topUpPerNode *big.Int + qualifiedTopUpPerNode *big.Int + auctionList []state.ValidatorInfoHandler +} + +type auctionConfig struct { + step *big.Int + minTopUp *big.Int + maxTopUp *big.Int + denominator *big.Int + maxNumberOfIterations uint64 +} + +type auctionListSelector struct { + shardCoordinator sharding.Coordinator + stakingDataProvider epochStart.StakingDataProvider + nodesConfigProvider epochStart.MaxNodesChangeConfigProvider + auctionListDisplayer AuctionListDisplayHandler + softAuctionConfig *auctionConfig +} + +// AuctionListSelectorArgs is a struct placeholder for all arguments required to create an auctionListSelector +type AuctionListSelectorArgs struct { + ShardCoordinator sharding.Coordinator + StakingDataProvider epochStart.StakingDataProvider + MaxNodesChangeConfigProvider epochStart.MaxNodesChangeConfigProvider + AuctionListDisplayHandler AuctionListDisplayHandler + SoftAuctionConfig config.SoftAuctionConfig + Denomination int +} + +// NewAuctionListSelector will create a new auctionListSelector, which handles selection of nodes from auction list based +// on their top up +func NewAuctionListSelector(args AuctionListSelectorArgs) (*auctionListSelector, error) { + softAuctionConfig, err := getAuctionConfig(args.SoftAuctionConfig, args.Denomination) + if err != nil { + return nil, err + } + err = checkNilArgs(args) + if err != nil { + return nil, err + } + + log.Debug("NewAuctionListSelector with config", + "top up step", softAuctionConfig.step.String(), + "min top up", softAuctionConfig.minTopUp.String(), + "max top up", softAuctionConfig.maxTopUp.String(), + "denomination", args.Denomination, + "denominator for pretty values", softAuctionConfig.denominator.String(), + ) + + return &auctionListSelector{ + shardCoordinator: args.ShardCoordinator, + stakingDataProvider: args.StakingDataProvider, + nodesConfigProvider: args.MaxNodesChangeConfigProvider, + auctionListDisplayer: args.AuctionListDisplayHandler, + softAuctionConfig: softAuctionConfig, + }, nil +} + +func getAuctionConfig(softAuctionConfig config.SoftAuctionConfig, denomination int) (*auctionConfig, error) { + step, ok := big.NewInt(0).SetString(softAuctionConfig.TopUpStep, 10) + if !ok || step.Cmp(zero) <= 0 { + return nil, fmt.Errorf("%w for step in soft auction config;expected number > 0, got %s", + process.ErrInvalidValue, + softAuctionConfig.TopUpStep, + ) + } + + minTopUp, ok := big.NewInt(0).SetString(softAuctionConfig.MinTopUp, 10) + if !ok || minTopUp.Cmp(zero) <= 0 { + return nil, fmt.Errorf("%w for min top up in soft auction config;expected number > 0, got %s", + process.ErrInvalidValue, + softAuctionConfig.MinTopUp, + ) + } + + maxTopUp, ok := big.NewInt(0).SetString(softAuctionConfig.MaxTopUp, 10) + if !ok || maxTopUp.Cmp(zero) <= 0 { + return nil, fmt.Errorf("%w for max top up in soft auction config;expected number > 0, got %s", + process.ErrInvalidValue, + softAuctionConfig.MaxTopUp, + ) + } + + if minTopUp.Cmp(maxTopUp) > 0 { + return nil, fmt.Errorf("%w for min/max top up in soft auction config; min value: %s > max value: %s", + process.ErrInvalidValue, + softAuctionConfig.MinTopUp, + softAuctionConfig.MaxTopUp, + ) + } + + if denomination < 0 { + return nil, fmt.Errorf("%w for denomination in soft auction config;expected number >= 0, got %d", + process.ErrInvalidValue, + denomination, + ) + } + + if softAuctionConfig.MaxNumberOfIterations == 0 { + return nil, fmt.Errorf("%w for max number of iterations in soft auction config;expected value > 0", + process.ErrInvalidValue, + ) + } + + denominationStr := "1" + strings.Repeat("0", denomination) + denominator, ok := big.NewInt(0).SetString(denominationStr, 10) + if !ok { + return nil, fmt.Errorf("%w for denomination: %d", + errCannotComputeDenominator, + denomination, + ) + } + + if minTopUp.Cmp(denominator) < 0 { + return nil, fmt.Errorf("%w for min top up in auction config; expected value to be >= %s, got %s", + process.ErrInvalidValue, + denominator.String(), + minTopUp.String(), + ) + } + + if step.Cmp(denominator) < 0 { + return nil, fmt.Errorf("%w for step in auction config; expected value to be >= %s, got %s", + process.ErrInvalidValue, + denominator.String(), + step.String(), + ) + } + + return &auctionConfig{ + step: step, + minTopUp: minTopUp, + maxTopUp: maxTopUp, + denominator: denominator, + maxNumberOfIterations: softAuctionConfig.MaxNumberOfIterations, + }, nil +} + +func checkNilArgs(args AuctionListSelectorArgs) error { + if check.IfNil(args.ShardCoordinator) { + return epochStart.ErrNilShardCoordinator + } + if check.IfNil(args.StakingDataProvider) { + return epochStart.ErrNilStakingDataProvider + } + if check.IfNil(args.MaxNodesChangeConfigProvider) { + return epochStart.ErrNilMaxNodesChangeConfigProvider + } + if check.IfNil(args.AuctionListDisplayHandler) { + return errNilAuctionListDisplayHandler + } + + return nil +} + +// SelectNodesFromAuctionList will select nodes from validatorsInfoMap based on their top up. If two or more validators +// have the same top-up, then sorting will be done based on blsKey XOR randomness. Selected nodes will have their list set +// to common.SelectNodesFromAuctionList +func (als *auctionListSelector) SelectNodesFromAuctionList( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, +) error { + if len(randomness) == 0 { + return process.ErrNilRandSeed + } + + ownersData, auctionListSize := als.getAuctionData() + if auctionListSize == 0 { + log.Info("auctionListSelector.SelectNodesFromAuctionList: empty auction list; skip selection") + return nil + } + + currNodesConfig := als.nodesConfigProvider.GetCurrentNodesConfig() + currNumOfValidators := als.stakingDataProvider.GetNumOfValidatorsInCurrentEpoch() + numOfShuffledNodes, numForcedToStay := als.computeNumShuffledNodes(currNodesConfig) + numOfValidatorsAfterShuffling, err := safeSub(currNumOfValidators, numOfShuffledNodes) + if err != nil { + log.Warn(fmt.Sprintf("auctionListSelector.SelectNodesFromAuctionList: %v when trying to compute numOfValidatorsAfterShuffling = %v - %v (currNumOfValidators - numOfShuffledNodes)", + err, + currNumOfValidators, + numOfShuffledNodes, + )) + numOfValidatorsAfterShuffling = 0 + } + + maxNumNodes := currNodesConfig.MaxNumNodes + numValidatorsAfterShufflingWithForcedToStay := numOfValidatorsAfterShuffling + numForcedToStay + availableSlots, err := safeSub(maxNumNodes, numValidatorsAfterShufflingWithForcedToStay) + if availableSlots == 0 || err != nil { + log.Info(fmt.Sprintf("auctionListSelector.SelectNodesFromAuctionList: %v or zero value when trying to compute availableSlots = %v - %v (maxNodes - numOfValidatorsAfterShuffling+numForcedToStay); skip selecting nodes from auction list", + err, + maxNumNodes, + numValidatorsAfterShufflingWithForcedToStay, + )) + return nil + } + + log.Info("auctionListSelector.SelectNodesFromAuctionList", + "max nodes", maxNumNodes, + "current number of validators", currNumOfValidators, + "num of nodes which will be shuffled out", numOfShuffledNodes, + "num forced to stay", numForcedToStay, + "num of validators after shuffling with forced to stay", numValidatorsAfterShufflingWithForcedToStay, + "auction list size", auctionListSize, + fmt.Sprintf("available slots (%v - %v)", maxNumNodes, numValidatorsAfterShufflingWithForcedToStay), availableSlots, + ) + + als.auctionListDisplayer.DisplayOwnersData(ownersData) + numOfAvailableNodeSlots := core.MinUint32(auctionListSize, availableSlots) + + sw := core.NewStopWatch() + sw.Start("auctionListSelector.sortAuctionList") + defer func() { + sw.Stop("auctionListSelector.sortAuctionList") + log.Debug("time measurements", sw.GetMeasurements()...) + }() + + return als.sortAuctionList(ownersData, numOfAvailableNodeSlots, validatorsInfoMap, randomness) +} + +func (als *auctionListSelector) getAuctionData() (map[string]*OwnerAuctionData, uint32) { + ownersData := make(map[string]*OwnerAuctionData) + numOfNodesInAuction := uint32(0) + + for owner, ownerData := range als.stakingDataProvider.GetOwnersData() { + if ownerData.Qualified && len(ownerData.AuctionList) > 0 { + numAuctionNodes := len(ownerData.AuctionList) + + ownersData[owner] = &OwnerAuctionData{ + numActiveNodes: ownerData.NumActiveNodes, + numAuctionNodes: int64(numAuctionNodes), + numQualifiedAuctionNodes: int64(numAuctionNodes), + numStakedNodes: ownerData.NumStakedNodes, + totalTopUp: ownerData.TotalTopUp, + topUpPerNode: ownerData.TopUpPerNode, + qualifiedTopUpPerNode: ownerData.TopUpPerNode, + auctionList: make([]state.ValidatorInfoHandler, numAuctionNodes), + } + copy(ownersData[owner].auctionList, ownerData.AuctionList) + numOfNodesInAuction += uint32(numAuctionNodes) + } + } + + return ownersData, numOfNodesInAuction +} + +func isInAuction(validator state.ValidatorInfoHandler) bool { + return validator.GetList() == string(common.AuctionList) +} + +func (als *auctionListSelector) computeNumShuffledNodes(currNodesConfig config.MaxNodesChangeConfig) (uint32, uint32) { + numNodesToShufflePerShard := currNodesConfig.NodesToShufflePerShard + numTotalToShuffleOut := numNodesToShufflePerShard * (als.shardCoordinator.NumberOfShards() + 1) + epochStats := als.stakingDataProvider.GetCurrentEpochValidatorStats() + + actuallyNumLeaving := uint32(0) + forcedToStay := uint32(0) + + for shardID := uint32(0); shardID < als.shardCoordinator.NumberOfShards(); shardID++ { + leavingInShard, forcedToStayInShard := computeActuallyNumLeaving(shardID, epochStats, numNodesToShufflePerShard) + actuallyNumLeaving += leavingInShard + forcedToStay += forcedToStayInShard + } + + leavingInMeta, forcedToStayInMeta := computeActuallyNumLeaving(core.MetachainShardId, epochStats, numNodesToShufflePerShard) + actuallyNumLeaving += leavingInMeta + forcedToStay += forcedToStayInMeta + + finalShuffledOut, err := safeSub(numTotalToShuffleOut, actuallyNumLeaving) + if err != nil { + log.Error("auctionListSelector.computeNumShuffledNodes error computing finalShuffledOut, returning default values", + "error", err, "numTotalToShuffleOut", numTotalToShuffleOut, "actuallyNumLeaving", actuallyNumLeaving) + return numTotalToShuffleOut, 0 + } + + return finalShuffledOut, forcedToStay +} + +func computeActuallyNumLeaving(shardID uint32, epochStats epochStart.ValidatorStatsInEpoch, numNodesToShuffledPerShard uint32) (uint32, uint32) { + numLeavingInShard := uint32(epochStats.Leaving[shardID]) + numActiveInShard := uint32(epochStats.Waiting[shardID] + epochStats.Eligible[shardID]) + + log.Debug("auctionListSelector.computeActuallyNumLeaving computing", + "shardID", shardID, "numLeavingInShard", numLeavingInShard, "numActiveInShard", numActiveInShard) + + actuallyLeaving := uint32(0) + forcedToStay := uint32(0) + if numLeavingInShard <= numNodesToShuffledPerShard && numActiveInShard > numLeavingInShard { + actuallyLeaving = numLeavingInShard + } + + if numLeavingInShard > numNodesToShuffledPerShard { + actuallyLeaving = numNodesToShuffledPerShard + forcedToStay = numLeavingInShard - numNodesToShuffledPerShard + } + + log.Debug("auctionListSelector.computeActuallyNumLeaving computed", + "actuallyLeaving", actuallyLeaving, "forcedToStay", forcedToStay) + + return actuallyLeaving, forcedToStay +} + +// TODO: Move this in elrond-go-core +func safeSub(a, b uint32) (uint32, error) { + if a < b { + return 0, epochStart.ErrUint32SubtractionOverflow + } + return a - b, nil +} + +func (als *auctionListSelector) sortAuctionList( + ownersData map[string]*OwnerAuctionData, + numOfAvailableNodeSlots uint32, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, +) error { + softAuctionNodesConfig := als.calcSoftAuctionNodesConfig(ownersData, numOfAvailableNodeSlots) + selectedNodes := als.selectNodes(softAuctionNodesConfig, numOfAvailableNodeSlots, randomness) + return markAuctionNodesAsSelected(selectedNodes, validatorsInfoMap) +} + +func (als *auctionListSelector) calcSoftAuctionNodesConfig( + data map[string]*OwnerAuctionData, + numAvailableSlots uint32, +) map[string]*OwnerAuctionData { + ownersData := copyOwnersData(data) + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + log.Debug("auctionListSelector: calc min and max possible top up", + "min top up per node", getPrettyValue(minTopUp, als.softAuctionConfig.denominator), + "max top up per node", getPrettyValue(maxTopUp, als.softAuctionConfig.denominator), + ) + + topUp := big.NewInt(0).SetBytes(minTopUp.Bytes()) + previousConfig := copyOwnersData(ownersData) + iterationNumber := uint64(0) + maxNumberOfIterationsReached := false + + for ; topUp.Cmp(maxTopUp) < 0 && !maxNumberOfIterationsReached; topUp.Add(topUp, als.softAuctionConfig.step) { + previousConfig = copyOwnersData(ownersData) + numNodesQualifyingForTopUp := calcNodesConfig(ownersData, topUp) + + if numNodesQualifyingForTopUp < int64(numAvailableSlots) { + break + } + + iterationNumber++ + maxNumberOfIterationsReached = iterationNumber >= als.softAuctionConfig.maxNumberOfIterations + } + + log.Debug("auctionListSelector: found min required", + "topUp", getPrettyValue(topUp, als.softAuctionConfig.denominator), + "after num of iterations", iterationNumber, + ) + return previousConfig +} + +func (als *auctionListSelector) getMinMaxPossibleTopUp(ownersData map[string]*OwnerAuctionData) (*big.Int, *big.Int) { + min := big.NewInt(0).SetBytes(als.softAuctionConfig.maxTopUp.Bytes()) + max := big.NewInt(0).SetBytes(als.softAuctionConfig.minTopUp.Bytes()) + + for _, owner := range ownersData { + if owner.topUpPerNode.Cmp(min) < 0 { + min = big.NewInt(0).SetBytes(owner.topUpPerNode.Bytes()) + } + + ownerNumNodesWithOnlyOneAuctionNode := big.NewInt(owner.numActiveNodes + 1) + maxPossibleTopUpForOwner := big.NewInt(0).Div(owner.totalTopUp, ownerNumNodesWithOnlyOneAuctionNode) + if maxPossibleTopUpForOwner.Cmp(max) > 0 { + max = big.NewInt(0).SetBytes(maxPossibleTopUpForOwner.Bytes()) + } + } + + if min.Cmp(als.softAuctionConfig.minTopUp) < 0 { + min = als.softAuctionConfig.minTopUp + } + + return min, max +} + +func copyOwnersData(ownersData map[string]*OwnerAuctionData) map[string]*OwnerAuctionData { + ret := make(map[string]*OwnerAuctionData) + for owner, data := range ownersData { + ret[owner] = &OwnerAuctionData{ + numActiveNodes: data.numActiveNodes, + numAuctionNodes: data.numAuctionNodes, + numQualifiedAuctionNodes: data.numQualifiedAuctionNodes, + numStakedNodes: data.numStakedNodes, + totalTopUp: data.totalTopUp, + topUpPerNode: data.topUpPerNode, + qualifiedTopUpPerNode: data.qualifiedTopUpPerNode, + auctionList: make([]state.ValidatorInfoHandler, len(data.auctionList)), + } + copy(ret[owner].auctionList, data.auctionList) + } + + return ret +} + +func calcNodesConfig(ownersData map[string]*OwnerAuctionData, topUp *big.Int) int64 { + numNodesQualifyingForTopUp := int64(0) + + for ownerPubKey, owner := range ownersData { + activeNodes := big.NewInt(owner.numActiveNodes) + topUpActiveNodes := big.NewInt(0).Mul(topUp, activeNodes) + validatorTopUpForAuction := big.NewInt(0).Sub(owner.totalTopUp, topUpActiveNodes) + if validatorTopUpForAuction.Cmp(topUp) < 0 { + delete(ownersData, ownerPubKey) + continue + } + + qualifiedNodesBigInt := big.NewInt(0).Div(validatorTopUpForAuction, topUp) + qualifiedNodes := qualifiedNodesBigInt.Int64() + isNumQualifiedNodesOverflow := !qualifiedNodesBigInt.IsUint64() + + if qualifiedNodes > owner.numAuctionNodes || isNumQualifiedNodesOverflow { + numNodesQualifyingForTopUp += owner.numAuctionNodes + } else { + numNodesQualifyingForTopUp += qualifiedNodes + owner.numQualifiedAuctionNodes = qualifiedNodes + + ownerRemainingNodes := big.NewInt(owner.numActiveNodes + owner.numQualifiedAuctionNodes) + owner.qualifiedTopUpPerNode = big.NewInt(0).Div(owner.totalTopUp, ownerRemainingNodes) + } + } + + return numNodesQualifyingForTopUp +} + +func markAuctionNodesAsSelected( + selectedNodes []state.ValidatorInfoHandler, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, +) error { + for _, node := range selectedNodes { + newNode := node.ShallowClone() + newNode.SetPreviousList(node.GetList()) + newNode.SetList(string(common.SelectedFromAuctionList)) + + err := validatorsInfoMap.Replace(node, newNode) + if err != nil { + return err + } + } + + return nil +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (als *auctionListSelector) IsInterfaceNil() bool { + return als == nil +} diff --git a/epochStart/metachain/auctionListSelector_test.go b/epochStart/metachain/auctionListSelector_test.go new file mode 100644 index 00000000000..25cced015fc --- /dev/null +++ b/epochStart/metachain/auctionListSelector_test.go @@ -0,0 +1,895 @@ +package metachain + +import ( + "math/big" + "strings" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/stretchr/testify/require" +) + +func createSoftAuctionConfig() config.SoftAuctionConfig { + return config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } +} + +func createAuctionListSelectorArgs(maxNodesChangeConfig []config.MaxNodesChangeConfig) AuctionListSelectorArgs { + epochNotifier := forking.NewGenericEpochNotifier() + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(epochNotifier, maxNodesChangeConfig) + + argsStakingDataProvider := createStakingDataProviderArgs() + stakingSCProvider, _ := NewStakingDataProvider(argsStakingDataProvider) + shardCoordinator, _ := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) + + softAuctionCfg := createSoftAuctionConfig() + auctionDisplayer, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: softAuctionCfg, + }) + return AuctionListSelectorArgs{ + ShardCoordinator: shardCoordinator, + StakingDataProvider: stakingSCProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + AuctionListDisplayHandler: auctionDisplayer, + SoftAuctionConfig: softAuctionCfg, + } +} + +func createFullAuctionListSelectorArgs(maxNodesChangeConfig []config.MaxNodesChangeConfig) (AuctionListSelectorArgs, ArgsNewEpochStartSystemSCProcessing) { + epochNotifier := forking.NewGenericEpochNotifier() + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(epochNotifier, maxNodesChangeConfig) + + argsSystemSC, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + argsSystemSC.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ + EpochField: stakingV4Step2EnableEpoch, + }) + argsSystemSC.MaxNodesChangeConfigProvider = nodesConfigProvider + + softAuctionCfg := createSoftAuctionConfig() + auctionDisplayer, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: softAuctionCfg, + }) + return AuctionListSelectorArgs{ + ShardCoordinator: argsSystemSC.ShardCoordinator, + StakingDataProvider: argsSystemSC.StakingDataProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + AuctionListDisplayHandler: auctionDisplayer, + SoftAuctionConfig: softAuctionCfg, + }, argsSystemSC +} + +func fillValidatorsInfo(t *testing.T, validatorsMap state.ShardValidatorsInfoMapHandler, sdp epochStart.StakingDataProvider) { + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + err := sdp.FillValidatorInfo(validator) + require.Nil(t, err) + } +} + +func TestNewAuctionListSelector(t *testing.T) { + t.Parallel() + + t.Run("nil shard coordinator", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.ShardCoordinator = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, epochStart.ErrNilShardCoordinator, err) + }) + + t.Run("nil staking data provider", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.StakingDataProvider = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, epochStart.ErrNilStakingDataProvider, err) + }) + + t.Run("nil max nodes change config provider", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.MaxNodesChangeConfigProvider = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, epochStart.ErrNilMaxNodesChangeConfigProvider, err) + }) + + t.Run("nil auction list displayer", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.AuctionListDisplayHandler = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, errNilAuctionListDisplayHandler, err) + }) + + t.Run("invalid soft auction config", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.SoftAuctionConfig.TopUpStep = "0" + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + requireInvalidValueError(t, err, "step") + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + als, err := NewAuctionListSelector(args) + require.NotNil(t, als) + require.Nil(t, err) + require.False(t, als.IsInterfaceNil()) + }) +} + +func requireInvalidValueError(t *testing.T, err error, msgToContain string) { + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), process.ErrInvalidValue.Error())) + require.True(t, strings.Contains(err.Error(), msgToContain)) +} + +func TestGetAuctionConfig(t *testing.T) { + t.Parallel() + + t.Run("invalid step", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + cfg.TopUpStep = "dsa" + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "step") + + cfg.TopUpStep = "-1" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "step") + + cfg.TopUpStep = "0" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "step") + }) + + t.Run("invalid min top up", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + cfg.MinTopUp = "dsa" + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min top up") + + cfg.MinTopUp = "-1" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min top up") + + cfg.MinTopUp = "0" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min top up") + }) + + t.Run("invalid max top up", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + cfg.MaxTopUp = "dsa" + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "max top up") + + cfg.MaxTopUp = "-1" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "max top up") + + cfg.MaxTopUp = "0" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "max top up") + }) + + t.Run("invalid denomination", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + res, err := getAuctionConfig(cfg, -1) + require.Nil(t, res) + requireInvalidValueError(t, err, "denomination") + }) + + t.Run("zero max number of iterations", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + cfg.MaxNumberOfIterations = 0 + + res, err := getAuctionConfig(cfg, 10) + require.Nil(t, res) + requireInvalidValueError(t, err, "for max number of iterations in soft auction config") + }) + + t.Run("min top up > max top up", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "32", + MaxTopUp: "16", + MaxNumberOfIterations: 1, + } + + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min value: 32 > max value: 16") + }) + + t.Run("min top up < denominator", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "100", + MinTopUp: "10", + MaxTopUp: "5000", + MaxNumberOfIterations: 1, + } + + res, err := getAuctionConfig(cfg, 2) + require.Nil(t, res) + requireInvalidValueError(t, err, "for min top up in auction config; expected value to be >= 100, got 10") + }) + + t.Run("step < denominator", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "100", + MaxTopUp: "5000", + MaxNumberOfIterations: 1, + } + + res, err := getAuctionConfig(cfg, 2) + require.Nil(t, res) + requireInvalidValueError(t, err, "for step in auction config; expected value to be >= 100, got 10") + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "444", + MaxNumberOfIterations: 100000, + } + + res, err := getAuctionConfig(cfg, 0) + require.Nil(t, err) + require.Equal(t, &auctionConfig{ + step: big.NewInt(10), + minTopUp: big.NewInt(1), + maxTopUp: big.NewInt(444), + denominator: big.NewInt(1), + maxNumberOfIterations: 100000, + }, res) + + minTopUp, _ := big.NewInt(0).SetString("1000000000000000000", 10) + maxTopUp, _ := big.NewInt(0).SetString("32000000000000000000000000", 10) + step, _ := big.NewInt(0).SetString("10000000000000000000", 10) + cfg = config.SoftAuctionConfig{ + TopUpStep: step.String(), + MinTopUp: minTopUp.String(), + MaxTopUp: maxTopUp.String(), + MaxNumberOfIterations: 100000, + } + + res, err = getAuctionConfig(cfg, 18) + require.Nil(t, err) + require.Equal(t, &auctionConfig{ + step: step, + minTopUp: minTopUp, + maxTopUp: maxTopUp, + denominator: minTopUp, + maxNumberOfIterations: 100000, + }, res) + }) +} + +func TestAuctionListSelector_SelectNodesFromAuction(t *testing.T) { + t.Parallel() + + t.Run("nil randomness, expect error", func(t *testing.T) { + t.Parallel() + + args := createAuctionListSelectorArgs(nil) + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(state.NewShardValidatorsInfoMap(), nil) + require.Equal(t, process.ErrNilRandSeed, err) + }) + + t.Run("empty auction list", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 2}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(state.NewShardValidatorsInfoMap(), []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) + + t.Run("not enough available slots to select auction nodes", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + owner2StakedKeys := [][]byte{[]byte("pubKey1")} + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[0], common.AuctionList, "", 0, owner2)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 1}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner2, owner2, owner2StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(validatorsInfo, []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner2StakedKeys[0], common.AuctionList, "", 0, owner2), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) + + t.Run("one eligible + one auction, max num nodes = 1, number of nodes after shuffling = 0, expect node in auction is selected", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + owner2StakedKeys := [][]byte{[]byte("pubKey1")} + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[0], common.AuctionList, "", 0, owner2)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 1, NodesToShufflePerShard: 1}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner2, owner2, owner2StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(validatorsInfo, []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner2StakedKeys[0], common.SelectedFromAuctionList, common.AuctionList, 0, owner2), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) + + t.Run("two available slots for auction nodes, but only one node in auction", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.AuctionList, "", 0, owner1)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 2}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(validatorsInfo, []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.SelectedFromAuctionList, common.AuctionList, 0, owner1), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) +} + +func TestAuctionListSelector_calcSoftAuctionNodesConfigEdgeCases(t *testing.T) { + t.Parallel() + + randomness := []byte("pk0") + args := createAuctionListSelectorArgs(nil) + als, _ := NewAuctionListSelector(args) + + t.Run("two validators, both have zero top up", func(t *testing.T) { + t.Parallel() + + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, als.softAuctionConfig.minTopUp, minTopUp) + require.Equal(t, als.softAuctionConfig.minTopUp, maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 2) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2}, selectedNodes) + }) + + t.Run("one validator with zero top up, one with min top up, one with top up", func(t *testing.T) { + t.Parallel() + + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + v3 := &state.ValidatorInfo{PublicKey: []byte("pk3")} + + owner1 := "owner1" + owner2 := "owner2" + owner3 := "owner3" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1), + topUpPerNode: big.NewInt(1), + qualifiedTopUpPerNode: big.NewInt(1), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + owner3: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v3}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(1), minTopUp) + require.Equal(t, big.NewInt(1000), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 3) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v3, v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 2) + expectedSoftAuctionConfig := copyOwnersData(softAuctionConfig) + delete(expectedSoftAuctionConfig, owner1) + require.Equal(t, expectedSoftAuctionConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v3, v2}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + delete(expectedSoftAuctionConfig, owner2) + require.Equal(t, expectedSoftAuctionConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v3}, selectedNodes) + }) + + t.Run("two validators, both have same top up", func(t *testing.T) { + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(1000), minTopUp) + require.Equal(t, big.NewInt(1000), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 2) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2}, selectedNodes) + }) + + t.Run("two validators, top up difference less than step", func(t *testing.T) { + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(995), + topUpPerNode: big.NewInt(995), + qualifiedTopUpPerNode: big.NewInt(995), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(995), minTopUp) + require.Equal(t, big.NewInt(1000), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 2) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v1, v2}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v1}, selectedNodes) + }) + + t.Run("three validators, top up difference equal to step", func(t *testing.T) { + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + v0 := &state.ValidatorInfo{PublicKey: []byte("pk0")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 2, + totalTopUp: big.NewInt(1980), + topUpPerNode: big.NewInt(990), + qualifiedTopUpPerNode: big.NewInt(990), + auctionList: []state.ValidatorInfoHandler{v2, v0}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(990), minTopUp) + require.Equal(t, big.NewInt(1980), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 3) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v1, v2, v0}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 2) + expectedSoftAuction := copyOwnersData(ownersData) + expectedSoftAuction[owner2].numQualifiedAuctionNodes = 1 + expectedSoftAuction[owner2].qualifiedTopUpPerNode = big.NewInt(1980) + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + delete(expectedSoftAuction, owner1) + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2}, selectedNodes) + }) + + t.Run("large top up difference, would qualify more nodes than an owner has, expect correct computation", func(t *testing.T) { + argsLargeTopUp := createAuctionListSelectorArgs(nil) + argsLargeTopUp.SoftAuctionConfig = config.SoftAuctionConfig{ + TopUpStep: "10000000000000000000", // 10 eGLD + MinTopUp: "1000000000000000000", // 1 eGLD + MaxTopUp: "32000000000000000000000000", // 32 mil eGLD + MaxNumberOfIterations: 10, + } + argsLargeTopUp.Denomination = 18 + selector, _ := NewAuctionListSelector(argsLargeTopUp) + + v0 := &state.ValidatorInfo{PublicKey: []byte("pk0")} + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + oneEGLD, _ := big.NewInt(0).SetString("1000000000000000000", 10) + owner1TopUp, _ := big.NewInt(0).SetString("32000000000000000000000000", 10) // 31 mil eGLD + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: owner1TopUp, + topUpPerNode: owner1TopUp, + qualifiedTopUpPerNode: owner1TopUp, + auctionList: []state.ValidatorInfoHandler{v0}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 2, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v1, v2}, + }, + } + + minTopUp, maxTopUp := selector.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, oneEGLD, minTopUp) + require.Equal(t, owner1TopUp, maxTopUp) + + softAuctionConfig := selector.calcSoftAuctionNodesConfig(ownersData, 3) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := selector.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v0, v2, v1}, selectedNodes) + + softAuctionConfig = selector.calcSoftAuctionNodesConfig(ownersData, 2) + expectedSoftAuction := copyOwnersData(ownersData) + expectedSoftAuction[owner1].numQualifiedAuctionNodes = 1 + expectedSoftAuction[owner1].qualifiedTopUpPerNode = owner1TopUp + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = selector.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v0, v2}, selectedNodes) + + softAuctionConfig = selector.calcSoftAuctionNodesConfig(ownersData, 1) + delete(expectedSoftAuction, owner2) + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = selector.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v0}, selectedNodes) + }) +} + +func TestAuctionListSelector_calcSoftAuctionNodesConfig(t *testing.T) { + t.Parallel() + + randomness := []byte("pk0") + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + v3 := &state.ValidatorInfo{PublicKey: []byte("pk3")} + v4 := &state.ValidatorInfo{PublicKey: []byte("pk4")} + v5 := &state.ValidatorInfo{PublicKey: []byte("pk5")} + v6 := &state.ValidatorInfo{PublicKey: []byte("pk6")} + v7 := &state.ValidatorInfo{PublicKey: []byte("pk7")} + v8 := &state.ValidatorInfo{PublicKey: []byte("pk8")} + + owner1 := "owner1" + owner2 := "owner2" + owner3 := "owner3" + owner4 := "owner4" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 2, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 4, + totalTopUp: big.NewInt(1500), + topUpPerNode: big.NewInt(375), + qualifiedTopUpPerNode: big.NewInt(375), + auctionList: []state.ValidatorInfoHandler{v1, v2}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 3, + numQualifiedAuctionNodes: 3, + numStakedNodes: 3, + totalTopUp: big.NewInt(3000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v3, v4, v5}, + }, + owner3: { + numActiveNodes: 1, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 3, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(333), + qualifiedTopUpPerNode: big.NewInt(333), + auctionList: []state.ValidatorInfoHandler{v6, v7}, + }, + owner4: { + numActiveNodes: 1, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 2, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v8}, + }, + } + + args := createAuctionListSelectorArgs(nil) + als, _ := NewAuctionListSelector(args) + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(1), minTopUp) // owner4 having all nodes in auction + require.Equal(t, big.NewInt(3000), maxTopUp) // owner2 having only only one node in auction + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 9) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 8, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v2, v1, v7, v6, v8}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 8) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 8, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v2, v1, v7, v6, v8}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 7) + expectedConfig := copyOwnersData(ownersData) + delete(expectedConfig, owner4) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 7, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v2, v1, v7, v6}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 6) + expectedConfig[owner3].numQualifiedAuctionNodes = 1 + expectedConfig[owner3].qualifiedTopUpPerNode = big.NewInt(500) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 6, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v7, v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 5) + expectedConfig[owner1].numQualifiedAuctionNodes = 1 + expectedConfig[owner1].qualifiedTopUpPerNode = big.NewInt(500) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 5, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v7, v2}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 4) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 4, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v7}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 3) + delete(expectedConfig, owner3) + delete(expectedConfig, owner1) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 2) + expectedConfig[owner2].numQualifiedAuctionNodes = 2 + expectedConfig[owner2].qualifiedTopUpPerNode = big.NewInt(1500) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + expectedConfig[owner2].numQualifiedAuctionNodes = 1 + expectedConfig[owner2].qualifiedTopUpPerNode = big.NewInt(3000) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5}, selectedNodes) +} diff --git a/epochStart/metachain/auctionListSorting.go b/epochStart/metachain/auctionListSorting.go new file mode 100644 index 00000000000..4759ec65bcb --- /dev/null +++ b/epochStart/metachain/auctionListSorting.go @@ -0,0 +1,104 @@ +package metachain + +import ( + "bytes" + "math/big" + "sort" + + "github.com/multiversx/mx-chain-go/state" +) + +func (als *auctionListSelector) selectNodes( + ownersData map[string]*OwnerAuctionData, + numAvailableSlots uint32, + randomness []byte, +) []state.ValidatorInfoHandler { + selectedFromAuction := make([]state.ValidatorInfoHandler, 0) + validatorTopUpMap := make(map[string]*big.Int) + + pubKeyLen := getPubKeyLen(ownersData) + normRand := calcNormalizedRandomness(randomness, pubKeyLen) + + for _, owner := range ownersData { + sortListByPubKey(owner.auctionList) + addQualifiedValidatorsTopUpInMap(owner, validatorTopUpMap) + selectedFromAuction = append(selectedFromAuction, owner.auctionList[:owner.numQualifiedAuctionNodes]...) + } + + als.auctionListDisplayer.DisplayOwnersSelectedNodes(ownersData) + sortValidators(selectedFromAuction, validatorTopUpMap, normRand) + als.auctionListDisplayer.DisplayAuctionList(selectedFromAuction, ownersData, numAvailableSlots) + + return selectedFromAuction[:numAvailableSlots] +} + +func getPubKeyLen(ownersData map[string]*OwnerAuctionData) int { + for _, owner := range ownersData { + return len(owner.auctionList[0].GetPublicKey()) + } + + return 0 +} + +func calcNormalizedRandomness(randomness []byte, expectedLen int) []byte { + rand := randomness + randLen := len(rand) + + if expectedLen > randLen { + repeatedCt := expectedLen/randLen + 1 + rand = bytes.Repeat(randomness, repeatedCt) + } + + rand = rand[:expectedLen] + return rand +} + +func sortListByPubKey(list []state.ValidatorInfoHandler) { + sort.SliceStable(list, func(i, j int) bool { + pubKey1 := list[i].GetPublicKey() + pubKey2 := list[j].GetPublicKey() + + return bytes.Compare(pubKey1, pubKey2) > 0 + }) +} + +func addQualifiedValidatorsTopUpInMap(owner *OwnerAuctionData, validatorTopUpMap map[string]*big.Int) { + for i := int64(0); i < owner.numQualifiedAuctionNodes; i++ { + validatorPubKey := string(owner.auctionList[i].GetPublicKey()) + validatorTopUpMap[validatorPubKey] = big.NewInt(0).SetBytes(owner.qualifiedTopUpPerNode.Bytes()) + } +} + +func sortValidators( + list []state.ValidatorInfoHandler, + validatorTopUpMap map[string]*big.Int, + randomness []byte, +) { + sort.SliceStable(list, func(i, j int) bool { + pubKey1 := list[i].GetPublicKey() + pubKey2 := list[j].GetPublicKey() + + nodeTopUpPubKey1 := validatorTopUpMap[string(pubKey1)] + nodeTopUpPubKey2 := validatorTopUpMap[string(pubKey2)] + + if nodeTopUpPubKey1.Cmp(nodeTopUpPubKey2) == 0 { + return compareByXORWithRandomness(pubKey1, pubKey2, randomness) + } + + return nodeTopUpPubKey1.Cmp(nodeTopUpPubKey2) > 0 + }) +} + +func compareByXORWithRandomness(pubKey1, pubKey2, randomness []byte) bool { + xorLen := len(randomness) + + key1Xor := make([]byte, xorLen) + key2Xor := make([]byte, xorLen) + + for idx := 0; idx < xorLen; idx++ { + key1Xor[idx] = pubKey1[idx] ^ randomness[idx] + key2Xor[idx] = pubKey2[idx] ^ randomness[idx] + } + + return bytes.Compare(key1Xor, key2Xor) == 1 +} diff --git a/epochStart/metachain/auctionListSorting_test.go b/epochStart/metachain/auctionListSorting_test.go new file mode 100644 index 00000000000..637869ea1d6 --- /dev/null +++ b/epochStart/metachain/auctionListSorting_test.go @@ -0,0 +1,39 @@ +package metachain + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCalcNormalizedRandomness(t *testing.T) { + t.Parallel() + + t.Run("randomness longer than expected len", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 2) + require.Equal(t, []byte("ra"), result) + }) + + t.Run("randomness length equal to expected len", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 4) + require.Equal(t, []byte("rand"), result) + }) + + t.Run("randomness length less than expected len", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 6) + require.Equal(t, []byte("randra"), result) + }) + + t.Run("expected len is zero", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 0) + require.Empty(t, result) + }) +} diff --git a/epochStart/metachain/common.go b/epochStart/metachain/common.go new file mode 100644 index 00000000000..9eb614772ab --- /dev/null +++ b/epochStart/metachain/common.go @@ -0,0 +1,16 @@ +package metachain + +import "github.com/multiversx/mx-chain-go/state" + +// GetAllNodeKeys returns all from the provided map +func GetAllNodeKeys(validatorsInfo state.ShardValidatorsInfoMapHandler) map[uint32][][]byte { + nodeKeys := make(map[uint32][][]byte) + for shardID, validatorsInfoSlice := range validatorsInfo.GetShardValidatorsInfoMap() { + nodeKeys[shardID] = make([][]byte, 0) + for _, validatorInfo := range validatorsInfoSlice { + nodeKeys[shardID] = append(nodeKeys[shardID], validatorInfo.GetPublicKey()) + } + } + + return nodeKeys +} diff --git a/epochStart/metachain/economicsDataProvider.go b/epochStart/metachain/economicsDataProvider.go index c39eb917521..ec165ffe80a 100644 --- a/epochStart/metachain/economicsDataProvider.go +++ b/epochStart/metachain/economicsDataProvider.go @@ -53,7 +53,7 @@ func (es *epochEconomicsStatistics) SetLeadersFees(fees *big.Int) { } // SetRewardsToBeDistributed sets the rewards to be distributed at the end of the epoch (includes the rewards per block, -//the block producers fees, protocol sustainability rewards and developer fees) +// the block producers fees, protocol sustainability rewards and developer fees) func (es *epochEconomicsStatistics) SetRewardsToBeDistributed(rewards *big.Int) { es.mutEconomicsStatistics.Lock() defer es.mutEconomicsStatistics.Unlock() @@ -99,7 +99,7 @@ func (es *epochEconomicsStatistics) LeaderFees() *big.Int { } // RewardsToBeDistributed returns the rewards to be distributed at the end of epoch (includes rewards for produced -//blocks, protocol sustainability rewards, block producer fees and developer fees) +// blocks, protocol sustainability rewards, block producer fees and developer fees) func (es *epochEconomicsStatistics) RewardsToBeDistributed() *big.Int { es.mutEconomicsStatistics.RLock() defer es.mutEconomicsStatistics.RUnlock() diff --git a/epochStart/metachain/epochStartData.go b/epochStart/metachain/epochStartData.go index 1c6bd30516e..1a67b3a3692 100644 --- a/epochStart/metachain/epochStartData.go +++ b/epochStart/metachain/epochStartData.go @@ -289,7 +289,7 @@ func (e *epochStartData) getShardDataFromEpochStartData( } epochStartIdentifier := core.EpochStartIdentifier(prevEpoch) - if prevEpoch == 0 { + if prevEpoch == e.genesisEpoch { return lastMetaHash, []byte(epochStartIdentifier), nil } diff --git a/epochStart/metachain/errors.go b/epochStart/metachain/errors.go new file mode 100644 index 00000000000..319bf83dafd --- /dev/null +++ b/epochStart/metachain/errors.go @@ -0,0 +1,11 @@ +package metachain + +import "errors" + +var errNilValidatorsInfoMap = errors.New("received nil shard validators info map") + +var errCannotComputeDenominator = errors.New("cannot compute denominator value") + +var errNilAuctionListDisplayHandler = errors.New("nil auction list display handler provided") + +var errNilTableDisplayHandler = errors.New("nil table display handler provided") diff --git a/epochStart/metachain/interface.go b/epochStart/metachain/interface.go new file mode 100644 index 00000000000..1e141fc079f --- /dev/null +++ b/epochStart/metachain/interface.go @@ -0,0 +1,24 @@ +package metachain + +import ( + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/state" +) + +// AuctionListDisplayHandler should be able to display auction list data during selection process +type AuctionListDisplayHandler interface { + DisplayOwnersData(ownersData map[string]*OwnerAuctionData) + DisplayOwnersSelectedNodes(ownersData map[string]*OwnerAuctionData) + DisplayAuctionList( + auctionList []state.ValidatorInfoHandler, + ownersData map[string]*OwnerAuctionData, + numOfSelectedNodes uint32, + ) + IsInterfaceNil() bool +} + +// TableDisplayHandler should be able to display tables in log +type TableDisplayHandler interface { + DisplayTable(tableHeader []string, lines []*display.LineData, message string) + IsInterfaceNil() bool +} diff --git a/epochStart/metachain/legacySystemSCs.go b/epochStart/metachain/legacySystemSCs.go new file mode 100644 index 00000000000..0db6a39916f --- /dev/null +++ b/epochStart/metachain/legacySystemSCs.go @@ -0,0 +1,1382 @@ +package metachain + +import ( + "bytes" + "context" + "fmt" + "math" + "math/big" + "sort" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/marshal" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" + vInfo "github.com/multiversx/mx-chain-go/common/validatorInfo" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" +) + +type legacySystemSCProcessor struct { + systemVM vmcommon.VMExecutionHandler + userAccountsDB state.AccountsAdapter + marshalizer marshal.Marshalizer + peerAccountsDB state.AccountsAdapter + chanceComputer nodesCoordinator.ChanceComputer + shardCoordinator sharding.Coordinator + startRating uint32 + validatorInfoCreator epochStart.ValidatorInfoCreator + genesisNodesConfig sharding.GenesisNodesSetupHandler + nodesConfigProvider epochStart.NodesConfigProvider + stakingDataProvider epochStart.StakingDataProvider + maxNodesChangeConfigProvider epochStart.MaxNodesChangeConfigProvider + endOfEpochCallerAddress []byte + stakingSCAddress []byte + esdtOwnerAddressBytes []byte + mapNumSwitchedPerShard map[uint32]uint32 + mapNumSwitchablePerShard map[uint32]uint32 + maxNodes uint32 + + flagChangeMaxNodesEnabled atomic.Flag + enableEpochsHandler common.EnableEpochsHandler +} + +func newLegacySystemSCProcessor(args ArgsNewEpochStartSystemSCProcessing) (*legacySystemSCProcessor, error) { + err := checkLegacyArgs(args) + if err != nil { + return nil, err + } + + legacy := &legacySystemSCProcessor{ + systemVM: args.SystemVM, + userAccountsDB: args.UserAccountsDB, + peerAccountsDB: args.PeerAccountsDB, + marshalizer: args.Marshalizer, + startRating: args.StartRating, + validatorInfoCreator: args.ValidatorInfoCreator, + genesisNodesConfig: args.GenesisNodesConfig, + endOfEpochCallerAddress: args.EndOfEpochCallerAddress, + stakingSCAddress: args.StakingSCAddress, + chanceComputer: args.ChanceComputer, + mapNumSwitchedPerShard: make(map[uint32]uint32), + mapNumSwitchablePerShard: make(map[uint32]uint32), + stakingDataProvider: args.StakingDataProvider, + nodesConfigProvider: args.NodesConfigProvider, + shardCoordinator: args.ShardCoordinator, + esdtOwnerAddressBytes: args.ESDTOwnerAddressBytes, + maxNodesChangeConfigProvider: args.MaxNodesChangeConfigProvider, + enableEpochsHandler: args.EnableEpochsHandler, + } + + return legacy, nil +} + +func checkLegacyArgs(args ArgsNewEpochStartSystemSCProcessing) error { + if check.IfNilReflect(args.SystemVM) { + return epochStart.ErrNilSystemVM + } + if check.IfNil(args.UserAccountsDB) { + return epochStart.ErrNilAccountsDB + } + if check.IfNil(args.PeerAccountsDB) { + return epochStart.ErrNilAccountsDB + } + if check.IfNil(args.Marshalizer) { + return epochStart.ErrNilMarshalizer + } + if check.IfNil(args.ValidatorInfoCreator) { + return epochStart.ErrNilValidatorInfoProcessor + } + if len(args.EndOfEpochCallerAddress) == 0 { + return epochStart.ErrNilEndOfEpochCallerAddress + } + if len(args.StakingSCAddress) == 0 { + return epochStart.ErrNilStakingSCAddress + } + if check.IfNil(args.ChanceComputer) { + return epochStart.ErrNilChanceComputer + } + if check.IfNil(args.GenesisNodesConfig) { + return epochStart.ErrNilGenesisNodesConfig + } + if check.IfNil(args.NodesConfigProvider) { + return epochStart.ErrNilNodesConfigProvider + } + if check.IfNil(args.StakingDataProvider) { + return epochStart.ErrNilStakingDataProvider + } + if check.IfNil(args.ShardCoordinator) { + return epochStart.ErrNilShardCoordinator + } + if check.IfNil(args.MaxNodesChangeConfigProvider) { + return epochStart.ErrNilMaxNodesChangeConfigProvider + } + if check.IfNil(args.EnableEpochsHandler) { + return process.ErrNilEnableEpochsHandler + } + if len(args.ESDTOwnerAddressBytes) == 0 { + return epochStart.ErrEmptyESDTOwnerAddress + } + + return nil +} + +func (s *legacySystemSCProcessor) processLegacy( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + nonce uint64, + epoch uint32, +) error { + if s.enableEpochsHandler.IsFlagEnabled(common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly) { + err := s.updateSystemSCConfigMinNodes() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2OwnerFlagInSpecificEpochOnly) { + err := s.updateOwnersForBlsKeys() + if err != nil { + return err + } + } + + // the updateMaxNodes call needs the StakingV2Flag functionality to be enabled. Otherwise, the call will error + if s.flagChangeMaxNodesEnabled.IsSet() && s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + err := s.updateMaxNodes(validatorsInfoMap, nonce) + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlagInSpecificEpochOnly) { + err := s.resetLastUnJailed() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.DelegationSmartContractFlagInSpecificEpochOnly) { + err := s.initDelegationSystemSC() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.cleanAdditionalQueue() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.computeNumWaitingPerShard(validatorsInfoMap) + if err != nil { + return err + } + + err = s.swapJailedWithWaiting(validatorsInfoMap) + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.prepareStakingDataForEligibleNodes(validatorsInfoMap) + if err != nil { + return err + } + + err = s.fillStakingDataForNonEligible(validatorsInfoMap) + if err != nil { + return err + } + + numUnStaked, err := s.unStakeNodesWithNotEnoughFunds(validatorsInfoMap, epoch) + if err != nil { + return err + } + + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + err = s.stakeNodesFromQueue(validatorsInfoMap, numUnStaked, nonce, common.NewList) + if err != nil { + return err + } + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.ESDTFlagInSpecificEpochOnly) { + err := s.initESDT() + if err != nil { + // not a critical error + log.Error("error while initializing ESDT", "err", err) + } + } + + return nil +} + +// ToggleUnStakeUnBond will pause/unPause the unStake/unBond functions on the validator system sc +func (s *legacySystemSCProcessor) ToggleUnStakeUnBond(value bool) error { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + return nil + } + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: nil, + CallValue: big.NewInt(0), + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "unPauseUnStakeUnBond", + } + + if value { + vmInput.Function = "pauseUnStakeUnBond" + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrSystemValidatorSCCall + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) unStakeNodesWithNotEnoughFunds( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + epoch uint32, +) (uint32, error) { + nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorsInfoMap) + if err != nil { + return 0, err + } + + nodesUnStakedFromAdditionalQueue := uint32(0) + + log.Debug("unStake nodes with not enough funds", "num", len(nodesToUnStake)) + for _, blsKey := range nodesToUnStake { + log.Debug("unStake at end of epoch for node", "blsKey", blsKey) + err = s.unStakeOneNode(blsKey, epoch) + if err != nil { + return 0, err + } + + validatorInfo := validatorsInfoMap.GetValidator(blsKey) + if validatorInfo == nil { + nodesUnStakedFromAdditionalQueue++ + log.Debug("unStaked node which was in additional queue", "blsKey", blsKey) + continue + } + + stakingV4Enabled := s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) + validatorLeaving := validatorInfo.ShallowClone() + validatorLeaving.SetListAndIndex(string(common.LeavingList), validatorLeaving.GetIndex(), stakingV4Enabled) + s.replaceValidators(validatorInfo, validatorLeaving, validatorsInfoMap) + } + + err = s.updateDelegationContracts(mapOwnersKeys) + if err != nil { + return 0, err + } + + nodesToStakeFromQueue := uint32(len(nodesToUnStake)) + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + nodesToStakeFromQueue -= nodesUnStakedFromAdditionalQueue + } + + log.Debug("stake nodes from waiting list", "num", nodesToStakeFromQueue) + return nodesToStakeFromQueue, nil +} + +func (s *legacySystemSCProcessor) unStakeOneNode(blsKey []byte, epoch uint32) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "unStakeAtEndOfEpoch", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + if vmOutput.ReturnCode != vmcommon.Ok { + log.Debug("unStakeOneNode", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) + return epochStart.ErrUnStakeExecuteError + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + account, errExists := s.peerAccountsDB.GetExistingAccount(blsKey) + if errExists != nil { + return nil + } + + peerAccount, ok := account.(state.PeerAccountHandler) + if !ok { + return epochStart.ErrWrongTypeAssertion + } + + peerAccount.SetListAndIndex(peerAccount.GetShardId(), string(common.LeavingList), peerAccount.GetIndexInList(), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + peerAccount.SetUnStakedEpoch(epoch) + err = s.peerAccountsDB.SaveAccount(peerAccount) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) updateDelegationContracts(mapOwnerKeys map[string][][]byte) error { + sortedDelegationsSCs := make([]string, 0, len(mapOwnerKeys)) + for address := range mapOwnerKeys { + shardId := s.shardCoordinator.ComputeId([]byte(address)) + if shardId != core.MetachainShardId { + continue + } + sortedDelegationsSCs = append(sortedDelegationsSCs, address) + } + + sort.Slice(sortedDelegationsSCs, func(i, j int) bool { + return sortedDelegationsSCs[i] < sortedDelegationsSCs[j] + }) + + for _, address := range sortedDelegationsSCs { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: mapOwnerKeys[address], + CallValue: big.NewInt(0), + }, + RecipientAddr: []byte(address), + Function: "unStakeAtEndOfEpoch", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + if vmOutput.ReturnCode != vmcommon.Ok { + log.Debug("unStakeAtEndOfEpoch", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) + return epochStart.ErrUnStakeExecuteError + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) fillStakingDataForNonEligible(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + for shId, validatorsInfoSlice := range validatorsInfoMap.GetShardValidatorsInfoMap() { + newList := make([]state.ValidatorInfoHandler, 0, len(validatorsInfoSlice)) + deleteCalled := false + + for _, validatorInfo := range validatorsInfoSlice { + if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { + newList = append(newList, validatorInfo) + continue + } + + err := s.stakingDataProvider.FillValidatorInfo(validatorInfo) + if err != nil { + deleteCalled = true + + log.Error("fillStakingDataForNonEligible", "error", err) + if len(validatorInfo.GetList()) > 0 { + return err + } + + err = s.peerAccountsDB.RemoveAccount(validatorInfo.GetPublicKey()) + if err != nil { + log.Error("fillStakingDataForNonEligible removeAccount", "error", err) + } + + continue + } + + newList = append(newList, validatorInfo) + } + + if deleteCalled { + s.setValidatorsInShard(validatorsInfoMap, shId, newList) + } + } + + return nil +} + +func (s *legacySystemSCProcessor) setValidatorsInShard( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + shardID uint32, + validators []state.ValidatorInfoHandler, +) { + err := validatorsInfoMap.SetValidatorsInShard(shardID, validators) + if err == nil { + return + } + + // this should never happen, but replace them anyway, as in old legacy code + log.Error("legacySystemSCProcessor.setValidatorsInShard", "error", err) + validatorsInfoMap.SetValidatorsInShardUnsafe(shardID, validators) +} + +func (s *legacySystemSCProcessor) prepareStakingDataForEligibleNodes(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + eligibleNodes, err := getEligibleNodeKeys(validatorsInfoMap) + if err != nil { + return err + } + + return s.prepareStakingData(eligibleNodes) +} + +func (s *legacySystemSCProcessor) prepareStakingData(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + sw := core.NewStopWatch() + sw.Start("prepareStakingDataForRewards") + defer func() { + sw.Stop("prepareStakingDataForRewards") + log.Debug("systemSCProcessor.prepareStakingDataForRewards time measurements", sw.GetMeasurements()...) + }() + + return s.stakingDataProvider.PrepareStakingData(validatorsInfoMap) +} + +func getEligibleNodeKeys( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, +) (state.ShardValidatorsInfoMapHandler, error) { + eligibleNodesKeys := state.NewShardValidatorsInfoMap() + for _, validatorInfo := range validatorsInfoMap.GetAllValidatorsInfo() { + if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { + err := eligibleNodesKeys.Add(validatorInfo.ShallowClone()) + if err != nil { + log.Error("getEligibleNodeKeys: could not add validator info in map", "error", err) + return nil, err + } + } + } + + return eligibleNodesKeys, nil +} + +// ProcessDelegationRewards will process the rewards which are directed towards the delegation system smart contracts +func (s *legacySystemSCProcessor) ProcessDelegationRewards( + miniBlocks block.MiniBlockSlice, + txCache epochStart.TransactionCacher, +) error { + if txCache == nil { + return epochStart.ErrNilLocalTxCache + } + + rwdMb := getRewardsMiniBlockForMeta(miniBlocks) + if rwdMb == nil { + return nil + } + + for _, txHash := range rwdMb.TxHashes { + rwdTx, err := txCache.GetTx(txHash) + if err != nil { + return err + } + + err = s.executeRewardTx(rwdTx) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) executeRewardTx(rwdTx data.TransactionHandler) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: nil, + CallValue: rwdTx.GetValue(), + }, + RecipientAddr: rwdTx.GetRcvAddr(), + Function: "updateRewards", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrSystemDelegationCall + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +// updates the configuration of the system SC if the flags permit +func (s *legacySystemSCProcessor) updateSystemSCConfigMinNodes() error { + minNumberOfNodesWithHysteresis := s.genesisNodesConfig.MinNumberOfNodesWithHysteresis() + err := s.setMinNumberOfNodes(minNumberOfNodesWithHysteresis) + + return err +} + +func (s *legacySystemSCProcessor) resetLastUnJailed() error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "resetLastUnJailedFromQueue", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrResetLastUnJailedFromQueue + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +// updates the configuration of the system SC if the flags permit +func (s *legacySystemSCProcessor) updateMaxNodes(validatorsInfoMap state.ShardValidatorsInfoMapHandler, nonce uint64) error { + sw := core.NewStopWatch() + sw.Start("total") + defer func() { + sw.Stop("total") + log.Debug("systemSCProcessor.updateMaxNodes", sw.GetMeasurements()...) + }() + + maxNumberOfNodes := s.maxNodes + sw.Start("setMaxNumberOfNodes") + prevMaxNumberOfNodes, err := s.setMaxNumberOfNodes(maxNumberOfNodes) + sw.Stop("setMaxNumberOfNodes") + if err != nil { + return err + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return nil + } + + if maxNumberOfNodes < prevMaxNumberOfNodes { + return epochStart.ErrInvalidMaxNumberOfNodes + } + + sw.Start("stakeNodesFromQueue") + err = s.stakeNodesFromQueue(validatorsInfoMap, maxNumberOfNodes-prevMaxNumberOfNodes, nonce, common.NewList) + sw.Stop("stakeNodesFromQueue") + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) computeNumWaitingPerShard(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + for shardID, validatorInfoList := range validatorsInfoMap.GetShardValidatorsInfoMap() { + totalInWaiting := uint32(0) + for _, validatorInfo := range validatorInfoList { + switch validatorInfo.GetList() { + case string(common.WaitingList): + totalInWaiting++ + } + } + s.mapNumSwitchablePerShard[shardID] = totalInWaiting + s.mapNumSwitchedPerShard[shardID] = 0 + } + return nil +} + +func (s *legacySystemSCProcessor) swapJailedWithWaiting(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + jailedValidators := s.getSortedJailedNodes(validatorsInfoMap) + + log.Debug("number of jailed validators", "num", len(jailedValidators)) + + newValidators := make(map[string]struct{}) + for _, jailedValidator := range jailedValidators { + if _, ok := newValidators[string(jailedValidator.GetPublicKey())]; ok { + continue + } + if isValidator(jailedValidator) && s.mapNumSwitchablePerShard[jailedValidator.GetShardId()] <= s.mapNumSwitchedPerShard[jailedValidator.GetShardId()] { + log.Debug("cannot switch in this epoch anymore for this shard as switched num waiting", + "shardID", jailedValidator.GetShardId(), + "numSwitched", s.mapNumSwitchedPerShard[jailedValidator.GetShardId()]) + continue + } + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{jailedValidator.GetPublicKey()}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "switchJailedWithWaiting", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + log.Debug("switchJailedWithWaiting called for", + "key", jailedValidator.GetPublicKey(), + "returnMessage", vmOutput.ReturnMessage) + if vmOutput.ReturnCode != vmcommon.Ok { + continue + } + + newValidator, err := s.stakingToValidatorStatistics(validatorsInfoMap, jailedValidator, vmOutput) + if err != nil { + return err + } + + if len(newValidator) != 0 { + newValidators[string(newValidator)] = struct{}{} + } + } + + return nil +} + +func (s *legacySystemSCProcessor) stakingToValidatorStatistics( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + jailedValidator state.ValidatorInfoHandler, + vmOutput *vmcommon.VMOutput, +) ([]byte, error) { + stakingSCOutput, ok := vmOutput.OutputAccounts[string(s.stakingSCAddress)] + if !ok { + return nil, epochStart.ErrStakingSCOutputAccountNotFound + } + + var activeStorageUpdate *vmcommon.StorageUpdate + for _, storageUpdate := range stakingSCOutput.StorageUpdates { + isNewValidatorKey := len(storageUpdate.Offset) == len(jailedValidator.GetPublicKey()) && + !bytes.Equal(storageUpdate.Offset, jailedValidator.GetPublicKey()) + if isNewValidatorKey { + activeStorageUpdate = storageUpdate + break + } + } + if activeStorageUpdate == nil { + log.Debug("no one in waiting suitable for switch") + if s.enableEpochsHandler.IsFlagEnabled(common.SaveJailedAlwaysFlag) { + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return nil, err + } + } + + return nil, nil + } + + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return nil, err + } + + var stakingData systemSmartContracts.StakedDataV2_0 + err = s.marshalizer.Unmarshal(&stakingData, activeStorageUpdate.Data) + if err != nil { + return nil, err + } + + blsPubKey := activeStorageUpdate.Offset + log.Debug("staking validator key who switches with the jailed one", "blsKey", blsPubKey) + account, isNew, err := state.GetPeerAccountAndReturnIfNew(s.peerAccountsDB, blsPubKey) + if err != nil { + return nil, err + } + + if !bytes.Equal(account.GetRewardAddress(), stakingData.RewardAddress) { + err = account.SetRewardAddress(stakingData.RewardAddress) + if err != nil { + return nil, err + } + } + + if !isNew { + // the new validator is deleted from the staking queue, not the jailed validator + validatorsInfoMap.DeleteByKey(blsPubKey, account.GetShardId()) + } + + account.SetListAndIndex(jailedValidator.GetShardId(), string(common.NewList), uint32(stakingData.StakedNonce), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + account.SetTempRating(s.startRating) + account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) + + err = s.peerAccountsDB.SaveAccount(account) + if err != nil { + return nil, err + } + + jailedAccount, err := s.getPeerAccount(jailedValidator.GetPublicKey()) + if err != nil { + return nil, err + } + + jailedAccount.SetListAndIndex(jailedValidator.GetShardId(), string(common.JailedList), jailedValidator.GetIndex(), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + jailedAccount.ResetAtNewEpoch() + err = s.peerAccountsDB.SaveAccount(jailedAccount) + if err != nil { + return nil, err + } + + if isValidator(jailedValidator) { + s.mapNumSwitchedPerShard[jailedValidator.GetShardId()]++ + } + + newValidatorInfo := s.validatorInfoCreator.PeerAccountToValidatorInfo(account) + s.replaceValidators(jailedValidator, newValidatorInfo, validatorsInfoMap) + + return blsPubKey, nil +} + +func (s *legacySystemSCProcessor) replaceValidators( + old state.ValidatorInfoHandler, + new state.ValidatorInfoHandler, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, +) { + // legacy code + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + _ = validatorsInfoMap.ReplaceValidatorByKey(old.GetPublicKey(), new, old.GetShardId()) + return + } + + // try with new code which does extra validity checks. + // if this also fails, do legacy code + if err := validatorsInfoMap.Replace(old, new); err != nil { + log.Error("legacySystemSCProcessor.replaceValidators", "error", err) + + replaced := validatorsInfoMap.ReplaceValidatorByKey(old.GetPublicKey(), new, old.GetShardId()) + log.Debug("legacySystemSCProcessor.replaceValidators", "old", old.GetPublicKey(), "new", new.GetPublicKey(), "was replace successful", replaced) + } +} + +func isValidator(validator state.ValidatorInfoHandler) bool { + return validator.GetList() == string(common.WaitingList) || validator.GetList() == string(common.EligibleList) +} + +func (s *legacySystemSCProcessor) getUserAccount(address []byte) (state.UserAccountHandler, error) { + acnt, err := s.userAccountsDB.LoadAccount(address) + if err != nil { + return nil, err + } + + stAcc, ok := acnt.(state.UserAccountHandler) + if !ok { + return nil, process.ErrWrongTypeAssertion + } + + return stAcc, nil +} + +// save account changes in state from vmOutput - protected by VM - every output can be treated as is. +func (s *legacySystemSCProcessor) processSCOutputAccounts( + vmOutput *vmcommon.VMOutput, +) error { + + outputAccounts := process.SortVMOutputInsideData(vmOutput) + for _, outAcc := range outputAccounts { + acc, err := s.getUserAccount(outAcc.Address) + if err != nil { + return err + } + + storageUpdates := process.GetSortedStorageUpdates(outAcc) + for _, storeUpdate := range storageUpdates { + err = acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) + if err != nil { + return err + } + } + + if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { + err = acc.AddToBalance(outAcc.BalanceDelta) + if err != nil { + return err + } + } + + err = s.userAccountsDB.SaveAccount(acc) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) getSortedJailedNodes(validatorsInfoMap state.ShardValidatorsInfoMapHandler) []state.ValidatorInfoHandler { + newJailedValidators := make([]state.ValidatorInfoHandler, 0) + oldJailedValidators := make([]state.ValidatorInfoHandler, 0) + + minChance := s.chanceComputer.GetChance(0) + for _, validatorInfo := range validatorsInfoMap.GetAllValidatorsInfo() { + if validatorInfo.GetList() == string(common.JailedList) { + oldJailedValidators = append(oldJailedValidators, validatorInfo) + } else if s.chanceComputer.GetChance(validatorInfo.GetTempRating()) < minChance { + newJailedValidators = append(newJailedValidators, validatorInfo) + } + + } + + sort.Sort(validatorList(oldJailedValidators)) + sort.Sort(validatorList(newJailedValidators)) + + return append(oldJailedValidators, newJailedValidators...) +} + +func (s *legacySystemSCProcessor) getPeerAccount(key []byte) (state.PeerAccountHandler, error) { + account, err := s.peerAccountsDB.LoadAccount(key) + if err != nil { + return nil, err + } + + peerAcc, ok := account.(state.PeerAccountHandler) + if !ok { + return nil, epochStart.ErrWrongTypeAssertion + } + + return peerAcc, nil +} + +func (s *legacySystemSCProcessor) setMinNumberOfNodes(minNumNodes uint32) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{big.NewInt(int64(minNumNodes)).Bytes()}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "updateConfigMinNodes", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + log.Debug("setMinNumberOfNodes called with", + "minNumNodes", minNumNodes, + "returnMessage", vmOutput.ReturnMessage) + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrInvalidMinNumberOfNodes + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) setMaxNumberOfNodes(maxNumNodes uint32) (uint32, error) { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{big.NewInt(int64(maxNumNodes)).Bytes()}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "updateConfigMaxNodes", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return 0, err + } + + log.Debug("setMaxNumberOfNodes called with", + "maxNumNodes", maxNumNodes, + "current maxNumNodes in legacySystemSCProcessor", s.maxNodes, + "returnMessage", vmOutput.ReturnMessage) + + if vmOutput.ReturnCode != vmcommon.Ok { + return 0, epochStart.ErrInvalidMaxNumberOfNodes + } + if len(vmOutput.ReturnData) != 1 { + return 0, epochStart.ErrInvalidSystemSCReturn + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return 0, err + } + + prevMaxNumNodes := big.NewInt(0).SetBytes(vmOutput.ReturnData[0]).Uint64() + return uint32(prevMaxNumNodes), nil +} + +func (s *legacySystemSCProcessor) updateOwnersForBlsKeys() error { + sw := core.NewStopWatch() + sw.Start("systemSCProcessor") + defer func() { + sw.Stop("systemSCProcessor") + log.Debug("systemSCProcessor.updateOwnersForBlsKeys time measurements", sw.GetMeasurements()...) + }() + + sw.Start("getValidatorSystemAccount") + userValidatorAccount, err := s.getValidatorSystemAccount() + sw.Stop("getValidatorSystemAccount") + if err != nil { + return err + } + + sw.Start("getArgumentsForSetOwnerFunctionality") + arguments, err := s.getArgumentsForSetOwnerFunctionality(userValidatorAccount) + sw.Stop("getArgumentsForSetOwnerFunctionality") + if err != nil { + return err + } + + sw.Start("callSetOwnersOnAddresses") + err = s.callSetOwnersOnAddresses(arguments) + sw.Stop("callSetOwnersOnAddresses") + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) getValidatorSystemAccount() (state.UserAccountHandler, error) { + validatorAccount, err := s.userAccountsDB.LoadAccount(vm.ValidatorSCAddress) + if err != nil { + return nil, fmt.Errorf("%w when loading validator account", err) + } + + userValidatorAccount, ok := validatorAccount.(state.UserAccountHandler) + if !ok { + return nil, fmt.Errorf("%w when loading validator account", epochStart.ErrWrongTypeAssertion) + } + + if check.IfNil(userValidatorAccount.DataTrie()) { + return nil, epochStart.ErrNilDataTrie + } + + return userValidatorAccount, nil +} + +func (s *legacySystemSCProcessor) getArgumentsForSetOwnerFunctionality(userValidatorAccount state.UserAccountHandler) ([][]byte, error) { + arguments := make([][]byte, 0) + + leavesChannels := &common.TrieIteratorChannels{ + LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), + ErrChan: errChan.NewErrChanWrapper(), + } + err := userValidatorAccount.GetAllLeaves(leavesChannels, context.Background()) + if err != nil { + return nil, err + } + for leaf := range leavesChannels.LeavesChan { + validatorData := &systemSmartContracts.ValidatorDataV2{} + + err = s.marshalizer.Unmarshal(validatorData, leaf.Value()) + if err != nil { + continue + } + for _, blsKey := range validatorData.BlsPubKeys { + arguments = append(arguments, blsKey) + arguments = append(arguments, leaf.Key()) + } + } + + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() + if err != nil { + return nil, err + } + + return arguments, nil +} + +func (s *legacySystemSCProcessor) callSetOwnersOnAddresses(arguments [][]byte) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.EndOfEpochAddress, + CallValue: big.NewInt(0), + Arguments: arguments, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "setOwnersOnAddresses", + } + + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when calling setOwnersOnAddresses function", errRun) + } + if vmOutput.ReturnCode != vmcommon.Ok { + return fmt.Errorf("got return code %s when calling setOwnersOnAddresses", vmOutput.ReturnCode) + } + + return s.processSCOutputAccounts(vmOutput) +} + +func (s *legacySystemSCProcessor) initDelegationSystemSC() error { + codeMetaData := &vmcommon.CodeMetadata{ + Upgradeable: false, + Payable: false, + Readable: true, + } + + vmInput := &vmcommon.ContractCreateInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.DelegationManagerSCAddress, + Arguments: [][]byte{}, + CallValue: big.NewInt(0), + }, + ContractCode: vm.DelegationManagerSCAddress, + ContractCodeMetadata: codeMetaData.ToBytes(), + } + + vmOutput, err := s.systemVM.RunSmartContractCreate(vmInput) + if err != nil { + return err + } + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrCouldNotInitDelegationSystemSC + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + err = s.updateSystemSCContractsCode(vmInput.ContractCodeMetadata) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) updateSystemSCContractsCode(contractMetadata []byte) error { + contractsToUpdate := make([][]byte, 0) + contractsToUpdate = append(contractsToUpdate, vm.StakingSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.ValidatorSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.GovernanceSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.ESDTSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.DelegationManagerSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.FirstDelegationSCAddress) + + for _, address := range contractsToUpdate { + userAcc, err := s.getUserAccount(address) + if err != nil { + return err + } + + userAcc.SetOwnerAddress(address) + userAcc.SetCodeMetadata(contractMetadata) + userAcc.SetCode(address) + + err = s.userAccountsDB.SaveAccount(userAcc) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) cleanAdditionalQueue() error { + sw := core.NewStopWatch() + sw.Start("systemSCProcessor") + defer func() { + sw.Stop("systemSCProcessor") + log.Debug("systemSCProcessor.cleanAdditionalQueue time measurements", sw.GetMeasurements()...) + }() + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.EndOfEpochAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{}, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "cleanAdditionalQueue", + } + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when cleaning additional queue", errRun) + } + if vmOutput.ReturnCode != vmcommon.Ok { + return fmt.Errorf("got return code %s, return message %s when cleaning additional queue", vmOutput.ReturnCode, vmOutput.ReturnMessage) + } + + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + // returnData format is list(address - all blsKeys which were unstaked for that) + addressLength := len(s.endOfEpochCallerAddress) + mapOwnersKeys := make(map[string][][]byte) + currentOwner := "" + for _, returnData := range vmOutput.ReturnData { + if len(returnData) == addressLength { + currentOwner = string(returnData) + continue + } + + if len(currentOwner) != addressLength { + continue + } + + mapOwnersKeys[currentOwner] = append(mapOwnersKeys[currentOwner], returnData) + } + + err = s.updateDelegationContracts(mapOwnersKeys) + if err != nil { + log.Error("update delegation contracts failed after cleaning additional queue", "error", err.Error()) + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) stakeNodesFromQueue( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + nodesToStake uint32, + nonce uint64, + list common.PeerType, +) error { + if nodesToStake == 0 { + return nil + } + + nodesToStakeAsBigInt := big.NewInt(0).SetUint64(uint64(nodesToStake)) + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.EndOfEpochAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{nodesToStakeAsBigInt.Bytes()}, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "stakeNodesFromQueue", + } + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when staking nodes from waiting list", errRun) + } + if vmOutput.ReturnCode != vmcommon.Ok { + return fmt.Errorf("got return code %s when staking nodes from waiting list", vmOutput.ReturnCode) + } + if len(vmOutput.ReturnData)%2 != 0 { + return fmt.Errorf("%w return data must be divisible by 2 when staking nodes from waiting list", epochStart.ErrInvalidSystemSCReturn) + } + + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + err = s.addNewlyStakedNodesToValidatorTrie(validatorsInfoMap, vmOutput.ReturnData, nonce, list) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) addNewlyStakedNodesToValidatorTrie( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + returnData [][]byte, + nonce uint64, + list common.PeerType, +) error { + for i := 0; i < len(returnData); i += 2 { + blsKey := returnData[i] + rewardAddress := returnData[i+1] + + peerAcc, err := s.getPeerAccount(blsKey) + if err != nil { + return err + } + + err = peerAcc.SetRewardAddress(rewardAddress) + if err != nil { + return err + } + + peerAcc.SetListAndIndex(peerAcc.GetShardId(), string(list), uint32(nonce), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + peerAcc.SetTempRating(s.startRating) + peerAcc.SetUnStakedEpoch(common.DefaultUnstakedEpoch) + + err = s.peerAccountsDB.SaveAccount(peerAcc) + if err != nil { + return err + } + + validatorInfo := &state.ValidatorInfo{ + PublicKey: blsKey, + ShardId: peerAcc.GetShardId(), + List: string(list), + Index: uint32(nonce), + TempRating: s.startRating, + Rating: s.startRating, + RewardAddress: rewardAddress, + AccumulatedFees: big.NewInt(0), + } + + err = s.addNewValidator(validatorsInfoMap, validatorInfo) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) addNewValidator( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + validatorInfo state.ValidatorInfoHandler, +) error { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return validatorsInfoMap.Add(validatorInfo) + } + + existingValidator := validatorsInfoMap.GetValidator(validatorInfo.GetPublicKey()) + if !check.IfNil(existingValidator) { + err := validatorsInfoMap.Delete(existingValidator) + if err != nil { + return err + } + } + + return validatorsInfoMap.Add(validatorInfo) +} + +func (s *legacySystemSCProcessor) initESDT() error { + currentConfigValues, err := s.extractConfigFromESDTContract() + if err != nil { + return err + } + + return s.changeESDTOwner(currentConfigValues) +} + +func (s *legacySystemSCProcessor) extractConfigFromESDTContract() ([][]byte, error) { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{}, + CallValue: big.NewInt(0), + GasProvided: math.MaxInt64, + }, + Function: "getContractConfig", + RecipientAddr: vm.ESDTSCAddress, + } + + output, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return nil, err + } + if len(output.ReturnData) != 4 { + return nil, fmt.Errorf("%w getContractConfig should have returned 4 values", epochStart.ErrInvalidSystemSCReturn) + } + + return output.ReturnData, nil +} + +func (s *legacySystemSCProcessor) changeESDTOwner(currentConfigValues [][]byte) error { + baseIssuingCost := currentConfigValues[1] + minTokenNameLength := currentConfigValues[2] + maxTokenNameLength := currentConfigValues[3] + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{s.esdtOwnerAddressBytes, baseIssuingCost, minTokenNameLength, maxTokenNameLength}, + CallValue: big.NewInt(0), + GasProvided: math.MaxUint64, + }, + Function: "configChange", + RecipientAddr: vm.ESDTSCAddress, + } + + output, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + if output.ReturnCode != vmcommon.Ok { + return fmt.Errorf("%w changeESDTOwner should have returned Ok", epochStart.ErrInvalidSystemSCReturn) + } + + return s.processSCOutputAccounts(output) +} + +func getRewardsMiniBlockForMeta(miniBlocks block.MiniBlockSlice) *block.MiniBlock { + for _, miniBlock := range miniBlocks { + if miniBlock.Type != block.RewardsBlock { + continue + } + if miniBlock.ReceiverShardID != core.MetachainShardId { + continue + } + return miniBlock + } + return nil +} + +func (s *legacySystemSCProcessor) legacyEpochConfirmed(epoch uint32) { + s.flagChangeMaxNodesEnabled.SetValue(false) + for _, maxNodesConfig := range s.maxNodesChangeConfigProvider.GetAllNodesConfig() { + if epoch == maxNodesConfig.EpochEnable { + s.flagChangeMaxNodesEnabled.SetValue(true) + break + } + } + s.maxNodes = s.maxNodesChangeConfigProvider.GetCurrentNodesConfig().MaxNumNodes + + log.Debug("legacySystemSC: change of maximum number of nodes and/or shuffling percentage", + "enabled", s.flagChangeMaxNodesEnabled.IsSet(), + "epoch", epoch, + "maxNodes", s.maxNodes, + ) +} diff --git a/epochStart/metachain/rewards.go b/epochStart/metachain/rewards.go index 3620070a6e0..0b279d56c32 100644 --- a/epochStart/metachain/rewards.go +++ b/epochStart/metachain/rewards.go @@ -50,7 +50,7 @@ func NewRewardsCreator(args ArgsNewRewardsCreator) (*rewardsCreator, error) { // CreateRewardsMiniBlocks creates the rewards miniblocks according to economics data and validator info func (rc *rewardsCreator) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { if check.IfNil(metaBlock) { @@ -116,7 +116,7 @@ func (rc *rewardsCreator) adjustProtocolSustainabilityRewards(protocolSustainabi } func (rc *rewardsCreator) addValidatorRewardsToMiniBlocks( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, metaBlock data.HeaderHandler, miniBlocks block.MiniBlockSlice, protocolSustainabilityRwdTx *rewardTx.RewardTx, @@ -162,41 +162,40 @@ func (rc *rewardsCreator) addValidatorRewardsToMiniBlocks( } func (rc *rewardsCreator) computeValidatorInfoPerRewardAddress( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, protocolSustainabilityRwd *rewardTx.RewardTx, epoch uint32, ) map[string]*rewardInfoData { rwdAddrValidatorInfo := make(map[string]*rewardInfoData) - for _, shardValidatorsInfo := range validatorsInfo { - for _, validatorInfo := range shardValidatorsInfo { - rewardsPerBlockPerNodeForShard := rc.mapBaseRewardsPerBlockPerValidator[validatorInfo.ShardId] - protocolRewardValue := big.NewInt(0).Mul(rewardsPerBlockPerNodeForShard, big.NewInt(0).SetUint64(uint64(validatorInfo.NumSelectedInSuccessBlocks))) + for _, validatorInfo := range validatorsInfo.GetAllValidatorsInfo() { + rewardsPerBlockPerNodeForShard := rc.mapBaseRewardsPerBlockPerValidator[validatorInfo.GetShardId()] + protocolRewardValue := big.NewInt(0).Mul(rewardsPerBlockPerNodeForShard, big.NewInt(0).SetUint64(uint64(validatorInfo.GetNumSelectedInSuccessBlocks()))) - isFix1Enabled := rc.isRewardsFix1Enabled(epoch) - if isFix1Enabled && validatorInfo.LeaderSuccess == 0 && validatorInfo.ValidatorSuccess == 0 { - protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) - continue - } - if !isFix1Enabled && validatorInfo.LeaderSuccess == 0 && validatorInfo.ValidatorFailure == 0 { - protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) - continue - } + isFix1Enabled := rc.isRewardsFix1Enabled(epoch) + if isFix1Enabled && validatorInfo.GetLeaderSuccess() == 0 && validatorInfo.GetValidatorSuccess() == 0 { + protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) + continue + } + if !isFix1Enabled && validatorInfo.GetLeaderSuccess() == 0 && validatorInfo.GetValidatorFailure() == 0 { + protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) + continue + } - rwdInfo, ok := rwdAddrValidatorInfo[string(validatorInfo.RewardAddress)] - if !ok { - rwdInfo = &rewardInfoData{ - accumulatedFees: big.NewInt(0), - rewardsFromProtocol: big.NewInt(0), - address: string(validatorInfo.RewardAddress), - } - rwdAddrValidatorInfo[string(validatorInfo.RewardAddress)] = rwdInfo + rwdInfo, ok := rwdAddrValidatorInfo[string(validatorInfo.GetRewardAddress())] + if !ok { + rwdInfo = &rewardInfoData{ + accumulatedFees: big.NewInt(0), + rewardsFromProtocol: big.NewInt(0), + address: string(validatorInfo.GetRewardAddress()), } - - rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, validatorInfo.AccumulatedFees) - rwdInfo.rewardsFromProtocol.Add(rwdInfo.rewardsFromProtocol, protocolRewardValue) + rwdAddrValidatorInfo[string(validatorInfo.GetRewardAddress())] = rwdInfo } + + rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, validatorInfo.GetAccumulatedFees()) + rwdInfo.rewardsFromProtocol.Add(rwdInfo.rewardsFromProtocol, protocolRewardValue) + } return rwdAddrValidatorInfo @@ -205,7 +204,7 @@ func (rc *rewardsCreator) computeValidatorInfoPerRewardAddress( // VerifyRewardsMiniBlocks verifies if received rewards miniblocks are correct func (rc *rewardsCreator) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { if check.IfNil(metaBlock) { diff --git a/epochStart/metachain/rewardsCreatorProxy.go b/epochStart/metachain/rewardsCreatorProxy.go index 6c183f43f7b..0e770c69629 100644 --- a/epochStart/metachain/rewardsCreatorProxy.go +++ b/epochStart/metachain/rewardsCreatorProxy.go @@ -64,7 +64,7 @@ func NewRewardsCreatorProxy(args RewardsCreatorProxyArgs) (*rewardsCreatorProxy, // CreateRewardsMiniBlocks proxies the CreateRewardsMiniBlocks method of the configured rewardsCreator instance func (rcp *rewardsCreatorProxy) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { err := rcp.changeRewardCreatorIfNeeded(metaBlock.GetEpoch()) @@ -77,7 +77,7 @@ func (rcp *rewardsCreatorProxy) CreateRewardsMiniBlocks( // VerifyRewardsMiniBlocks proxies the same method of the configured rewardsCreator instance func (rcp *rewardsCreatorProxy) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { err := rcp.changeRewardCreatorIfNeeded(metaBlock.GetEpoch()) diff --git a/epochStart/metachain/rewardsCreatorProxy_test.go b/epochStart/metachain/rewardsCreatorProxy_test.go index 6de5ac93a49..e41730d34f1 100644 --- a/epochStart/metachain/rewardsCreatorProxy_test.go +++ b/epochStart/metachain/rewardsCreatorProxy_test.go @@ -15,9 +15,11 @@ import ( "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/mock" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" "github.com/stretchr/testify/require" ) @@ -55,9 +57,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithError(t *testing.T) { t.Parallel() expectedErr := fmt.Errorf("expectedError") - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return nil, expectedErr }, @@ -74,9 +76,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithError(t *testing.T) { func TestRewardsCreatorProxy_CreateRewardsMiniBlocksOK(t *testing.T) { t.Parallel() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return make(block.MiniBlockSlice, 2), nil }, @@ -93,9 +95,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksOK(t *testing.T) { func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV2(t *testing.T) { t.Parallel() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return make(block.MiniBlockSlice, 2), nil }, @@ -125,9 +127,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV2 func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV1(t *testing.T) { t.Parallel() - rewardCreatorV2 := &mock.RewardsCreatorStub{ + rewardCreatorV2 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return make(block.MiniBlockSlice, 2), nil }, @@ -159,9 +161,9 @@ func TestRewardsCreatorProxy_VerifyRewardsMiniBlocksWithError(t *testing.T) { t.Parallel() expectedErr := fmt.Errorf("expectedError") - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics) error { + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics) error { return expectedErr }, } @@ -176,9 +178,9 @@ func TestRewardsCreatorProxy_VerifyRewardsMiniBlocksWithError(t *testing.T) { func TestRewardsCreatorProxy_VerifyRewardsMiniBlocksOK(t *testing.T) { t.Parallel() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics) error { + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics) error { return nil }, } @@ -194,7 +196,7 @@ func TestRewardsCreatorProxy_GetProtocolSustainabilityRewards(t *testing.T) { t.Parallel() expectedValue := big.NewInt(12345) - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ GetProtocolSustainabilityRewardsCalled: func() *big.Int { return expectedValue }, @@ -210,7 +212,7 @@ func TestRewardsCreatorProxy_GetLocalTxCache(t *testing.T) { t.Parallel() expectedValue := &mock.TxForCurrentBlockStub{} - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ GetLocalTxCacheCalled: func() epochStart.TransactionCacher { return expectedValue }, @@ -228,7 +230,7 @@ func TestRewardsCreatorProxy_CreateMarshalizedData(t *testing.T) { expectedValue := make(map[string][][]byte) blockBody := createDefaultBlockBody() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateMarshalledDataCalled: func(body *block.Body) map[string][][]byte { if blockBody == body { return expectedValue @@ -252,7 +254,7 @@ func TestRewardsCreatorProxy_GetRewardsTxs(t *testing.T) { } blockBody := createDefaultBlockBody() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ GetRewardsTxsCalled: func(body *block.Body) map[string]data.TransactionHandler { if blockBody == body { return expectedValue @@ -273,7 +275,7 @@ func TestRewardsCreatorProxy_SaveTxBlockToStorage(t *testing.T) { blockBody := createDefaultBlockBody() functionCalled := false - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ SaveBlockDataToStorageCalled: func(metaBlock data.MetaHeaderHandler, body *block.Body) { functionCalled = true }, @@ -291,7 +293,7 @@ func TestRewardsCreatorProxy_DeleteTxsFromStorage(t *testing.T) { blockBody := createDefaultBlockBody() functionCalled := false - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ DeleteBlockDataFromStorageCalled: func(metaBlock data.MetaHeaderHandler, body *block.Body) { functionCalled = true }, @@ -309,7 +311,7 @@ func TestRewardsCreatorProxy_RemoveBlockDataFromPools(t *testing.T) { blockBody := createDefaultBlockBody() functionCalled := false - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ RemoveBlockDataFromPoolsCalled: func(metaBlock data.MetaHeaderHandler, body *block.Body) { functionCalled = true }, @@ -327,13 +329,13 @@ func TestRewardsCreatorProxy_IsInterfaceNil(t *testing.T) { var rewardsCreatorProxy epochStart.RewardsCreator require.True(t, check.IfNil(rewardsCreatorProxy)) - rewardCreatorV1 := &mock.RewardsCreatorStub{} + rewardCreatorV1 := &testscommon.RewardsCreatorStub{} rewardsCreatorProxy, _, _ = createTestData(rewardCreatorV1, rCreatorV1) require.False(t, check.IfNil(rewardsCreatorProxy)) } -func createTestData(rewardCreator *mock.RewardsCreatorStub, rcType configuredRewardsCreator) (*rewardsCreatorProxy, map[uint32][]*state.ValidatorInfo, *block.MetaBlock) { +func createTestData(rewardCreator epochStart.RewardsCreator, rcType configuredRewardsCreator) (*rewardsCreatorProxy, state.ShardValidatorsInfoMapHandler, *block.MetaBlock) { args := createDefaultRewardsCreatorProxyArgs() rewardsCreatorProxy := &rewardsCreatorProxy{ rc: rewardCreator, @@ -380,7 +382,7 @@ func createDefaultRewardsCreatorProxyArgs() RewardsCreatorProxyArgs { return RewardsCreatorProxyArgs{ BaseRewardsCreatorArgs: getBaseRewardsArguments(), - StakingDataProvider: &mock.StakingDataProviderStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, EconomicsDataProvider: NewEpochEconomicsStatistics(), RewardsHandler: rewardsHandler, } diff --git a/epochStart/metachain/rewardsV2.go b/epochStart/metachain/rewardsV2.go index 371f577b875..ddfc05abcfe 100644 --- a/epochStart/metachain/rewardsV2.go +++ b/epochStart/metachain/rewardsV2.go @@ -25,7 +25,7 @@ type nodeRewardsData struct { fullRewards *big.Int topUpStake *big.Int powerInShard *big.Int - valInfo *state.ValidatorInfo + valInfo state.ValidatorInfoHandler } // RewardsCreatorArgsV2 holds the data required to create end of epoch rewards @@ -75,7 +75,7 @@ func NewRewardsCreatorV2(args RewardsCreatorArgsV2) (*rewardsCreatorV2, error) { // stake top-up values per node func (rc *rewardsCreatorV2) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { if check.IfNil(metaBlock) { @@ -151,7 +151,7 @@ func (rc *rewardsCreatorV2) adjustProtocolSustainabilityRewards(protocolSustaina // VerifyRewardsMiniBlocks verifies if received rewards miniblocks are correct func (rc *rewardsCreatorV2) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { if check.IfNil(metaBlock) { @@ -222,23 +222,23 @@ func (rc *rewardsCreatorV2) computeValidatorInfoPerRewardAddress( for _, nodeInfoList := range nodesRewardInfo { for _, nodeInfo := range nodeInfoList { - if nodeInfo.valInfo.LeaderSuccess == 0 && nodeInfo.valInfo.ValidatorSuccess == 0 { + if nodeInfo.valInfo.GetLeaderSuccess() == 0 && nodeInfo.valInfo.GetValidatorSuccess() == 0 { accumulatedUnassigned.Add(accumulatedUnassigned, nodeInfo.fullRewards) continue } - rwdInfo, ok := rwdAddrValidatorInfo[string(nodeInfo.valInfo.RewardAddress)] + rwdInfo, ok := rwdAddrValidatorInfo[string(nodeInfo.valInfo.GetRewardAddress())] if !ok { rwdInfo = &rewardInfoData{ accumulatedFees: big.NewInt(0), rewardsFromProtocol: big.NewInt(0), - address: string(nodeInfo.valInfo.RewardAddress), + address: string(nodeInfo.valInfo.GetRewardAddress()), } - rwdAddrValidatorInfo[string(nodeInfo.valInfo.RewardAddress)] = rwdInfo + rwdAddrValidatorInfo[string(nodeInfo.valInfo.GetRewardAddress())] = rwdInfo } - distributedLeaderFees.Add(distributedLeaderFees, nodeInfo.valInfo.AccumulatedFees) - rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, nodeInfo.valInfo.AccumulatedFees) + distributedLeaderFees.Add(distributedLeaderFees, nodeInfo.valInfo.GetAccumulatedFees()) + rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, nodeInfo.valInfo.GetAccumulatedFees()) rwdInfo.rewardsFromProtocol.Add(rwdInfo.rewardsFromProtocol, nodeInfo.fullRewards) } } @@ -263,7 +263,7 @@ func (rc *rewardsCreatorV2) IsInterfaceNil() bool { } func (rc *rewardsCreatorV2) computeRewardsPerNode( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, ) (map[uint32][]*nodeRewardsData, *big.Int) { var baseRewardsPerBlock *big.Int @@ -302,11 +302,11 @@ func (rc *rewardsCreatorV2) computeRewardsPerNode( } func (rc *rewardsCreatorV2) initNodesRewardsInfo( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, ) map[uint32][]*nodeRewardsData { nodesRewardsInfo := make(map[uint32][]*nodeRewardsData) - for shardID, valInfoList := range validatorsInfo { + for shardID, valInfoList := range validatorsInfo.GetShardValidatorsInfoMap() { nodesRewardsInfo[shardID] = make([]*nodeRewardsData, 0, len(valInfoList)) for _, valInfo := range valInfoList { if validatorInfo.WasEligibleInCurrentEpoch(valInfo) { @@ -336,7 +336,7 @@ func (rc *rewardsCreatorV2) computeBaseRewardsPerNode( for _, nodeRewardsInfo := range nodeRewardsInfoList { nodeRewardsInfo.baseReward = big.NewInt(0).Mul( rc.mapBaseRewardsPerBlockPerValidator[shardID], - big.NewInt(int64(nodeRewardsInfo.valInfo.NumSelectedInSuccessBlocks))) + big.NewInt(int64(nodeRewardsInfo.valInfo.GetNumSelectedInSuccessBlocks()))) accumulatedRewards.Add(accumulatedRewards, nodeRewardsInfo.baseReward) } } @@ -507,13 +507,13 @@ func computeNodesPowerInShard( // power in epoch is computed as nbBlocks*nodeTopUp, where nbBlocks represents the number of blocks the node // participated at creation/validation -func computeNodePowerInShard(nodeInfo *state.ValidatorInfo, nodeTopUp *big.Int) *big.Int { +func computeNodePowerInShard(nodeInfo state.ValidatorInfoHandler, nodeTopUp *big.Int) *big.Int { // if node was offline, it had no power, so the rewards should go to the others - if nodeInfo.LeaderSuccess == 0 && nodeInfo.ValidatorSuccess == 0 { + if nodeInfo.GetLeaderSuccess() == 0 && nodeInfo.GetValidatorSuccess() == 0 { return big.NewInt(0) } - nbBlocks := big.NewInt(0).SetUint64(uint64(nodeInfo.NumSelectedInSuccessBlocks)) + nbBlocks := big.NewInt(0).SetUint64(uint64(nodeInfo.GetNumSelectedInSuccessBlocks())) return big.NewInt(0).Mul(nbBlocks, nodeTopUp) } diff --git a/epochStart/metachain/rewardsV2_test.go b/epochStart/metachain/rewardsV2_test.go index 48d9564b7aa..7abea51dea3 100644 --- a/epochStart/metachain/rewardsV2_test.go +++ b/epochStart/metachain/rewardsV2_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" "github.com/stretchr/testify/require" ) @@ -106,12 +107,12 @@ func TestNewRewardsCreatorV2_initNodesRewardsInfo(t *testing.T) { valInfoEligibleWithExtra := addNonEligibleValidatorInfo(100, valInfoEligible, string(common.WaitingList)) nodesRewardInfo := rwd.initNodesRewardsInfo(valInfoEligibleWithExtra) - require.Equal(t, len(valInfoEligible), len(nodesRewardInfo)) + require.Equal(t, len(valInfoEligible.GetShardValidatorsInfoMap()), len(nodesRewardInfo)) for shardID, nodeInfoList := range nodesRewardInfo { - require.Equal(t, len(nodeInfoList), len(valInfoEligible[shardID])) + require.Equal(t, len(nodeInfoList), len(valInfoEligible.GetShardValidatorsInfoMap()[shardID])) for i, nodeInfo := range nodeInfoList { - require.True(t, valInfoEligible[shardID][i] == nodeInfo.valInfo) + require.True(t, valInfoEligible.GetShardValidatorsInfoMap()[shardID][i] == nodeInfo.valInfo) require.Equal(t, zero, nodeInfo.topUpStake) require.Equal(t, zero, nodeInfo.powerInShard) require.Equal(t, zero, nodeInfo.baseReward) @@ -126,7 +127,7 @@ func TestNewRewardsCreatorV2_getTopUpForAllEligibleNodes(t *testing.T) { args := getRewardsCreatorV2Arguments() topUpVal, _ := big.NewInt(0).SetString("100000000000000000000", 10) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { topUp := big.NewInt(0).Set(topUpVal) return topUp, nil @@ -155,7 +156,7 @@ func TestNewRewardsCreatorV2_getTopUpForAllEligibleSomeBLSKeysNotFoundZeroed(t * args := getRewardsCreatorV2Arguments() topUpVal, _ := big.NewInt(0).SetString("100000000000000000000", 10) notFoundKey := []byte("notFound") - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { if bytes.Equal(blsKey, notFoundKey) { return nil, fmt.Errorf("not found") @@ -170,9 +171,9 @@ func TestNewRewardsCreatorV2_getTopUpForAllEligibleSomeBLSKeysNotFoundZeroed(t * nodesPerShard := uint32(10) valInfo := createDefaultValidatorInfo(nodesPerShard, args.ShardCoordinator, args.NodesConfigProvider, 100, defaultBlocksPerShard) - for _, valList := range valInfo { - valList[0].PublicKey = notFoundKey - valList[1].PublicKey = notFoundKey + for _, valList := range valInfo.GetShardValidatorsInfoMap() { + valList[0].SetPublicKey(notFoundKey) + valList[1].SetPublicKey(notFoundKey) } nodesRewardInfo := rwd.initNodesRewardsInfo(valInfo) @@ -387,7 +388,7 @@ func TestNewRewardsCreatorV2_computeNodesPowerInShard(t *testing.T) { for _, nodeInfoList := range nodesRewardInfo { for _, nodeInfo := range nodeInfoList { - blocks := nodeInfo.valInfo.NumSelectedInSuccessBlocks + blocks := nodeInfo.valInfo.GetNumSelectedInSuccessBlocks() topUp := nodeInfo.topUpStake require.Equal(t, big.NewInt(0).Mul(big.NewInt(int64(blocks)), topUp), nodeInfo.powerInShard) } @@ -607,11 +608,11 @@ func TestNewRewardsCreatorV2_computeTopUpRewardsPerNode(t *testing.T) { nodesRewardInfo := dummyRwd.initNodesRewardsInfo(vInfo) _, _ = setDummyValuesInNodesRewardInfo(nodesRewardInfo, nbEligiblePerShard, tuStake, 0) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -653,7 +654,7 @@ func TestNewRewardsCreatorV2_computeTopUpRewardsPerNodeNotFoundBLSKeys(t *testin args := getRewardsCreatorV2Arguments() nbEligiblePerShard := uint32(400) vInfo := createDefaultValidatorInfo(nbEligiblePerShard, args.ShardCoordinator, args.NodesConfigProvider, 100, defaultBlocksPerShard) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { return nil, fmt.Errorf("not found") }, @@ -737,15 +738,15 @@ func TestNewRewardsCreatorV2_computeRewardsPerNode(t *testing.T) { nodesRewardInfo := dummyRwd.initNodesRewardsInfo(vInfo) _, totalTopUpStake := setDummyValuesInNodesRewardInfo(nodesRewardInfo, nbEligiblePerShard, tuStake, 0) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { topUpStake := big.NewInt(0).Set(totalTopUpStake) return topUpStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1042,7 +1043,7 @@ func TestNewRewardsCreatorV35_computeRewardsPer3200NodesWithDifferentTopups(t *t nodesRewardInfo, _ := setupNodeRewardInfo(setupResult, vInfo, topupStakePerNode, tt.validatorTopupStake) setupResult.EconomicsDataProvider.SetRewardsToBeDistributedForBlocks(setupResult.rewardsForBlocks) - setupResult.RewardsCreatorArgsV2.StakingDataProvider = &mock.StakingDataProviderStub{ + setupResult.RewardsCreatorArgsV2.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return topupEligibleStake }, @@ -1050,9 +1051,9 @@ func TestNewRewardsCreatorV35_computeRewardsPer3200NodesWithDifferentTopups(t *t return baseEligibleStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1149,7 +1150,7 @@ func TestNewRewardsCreatorV2_computeRewardsPer3200NodesWithDifferentTopups(t *te nodesRewardInfo, _ := setupNodeRewardInfo(setupResult, vInfo, topupStakePerNode, tt.validatorTopupStake) setupResult.EconomicsDataProvider.SetRewardsToBeDistributedForBlocks(setupResult.rewardsForBlocks) - setupResult.RewardsCreatorArgsV2.StakingDataProvider = &mock.StakingDataProviderStub{ + setupResult.RewardsCreatorArgsV2.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return topupEligibleStake }, @@ -1157,9 +1158,9 @@ func TestNewRewardsCreatorV2_computeRewardsPer3200NodesWithDifferentTopups(t *te return baseEligibleStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1200,7 +1201,7 @@ func TestNewRewardsCreatorV2_computeRewardsPer3200NodesWithDifferentTopups(t *te func setupNodeRewardInfo( setupResult SetupRewardsResult, - vInfo map[uint32][]*state.ValidatorInfo, + vInfo state.ShardValidatorsInfoMapHandler, topupStakePerNode *big.Int, validatorTopupStake *big.Int, ) (map[uint32][]*nodeRewardsData, error) { @@ -1267,7 +1268,7 @@ func computeRewardsAndDust(nbEligiblePerShard uint32, args SetupRewardsResult, t totalEligibleStake, _ := big.NewInt(0).SetString("4000000"+"000000000000000000", 10) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return totalTopUpStake }, @@ -1275,9 +1276,9 @@ func computeRewardsAndDust(nbEligiblePerShard uint32, args SetupRewardsResult, t return totalEligibleStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1360,11 +1361,11 @@ func TestNewRewardsCreatorV2_computeValidatorInfoPerRewardAddressWithOfflineVali nbShards := int64(args.ShardCoordinator.NumberOfShards()) + 1 args.EconomicsDataProvider.SetLeadersFees(big.NewInt(0).Mul(big.NewInt(int64(proposerFee)), big.NewInt(int64(nbEligiblePerShard-nbOfflinePerShard)*nbShards))) valInfo := createDefaultValidatorInfo(nbEligiblePerShard, args.ShardCoordinator, args.NodesConfigProvider, proposerFee, defaultBlocksPerShard) - for _, valList := range valInfo { + for _, valList := range valInfo.GetShardValidatorsInfoMap() { for i := 0; i < int(nbOfflinePerShard); i++ { - valList[i].LeaderSuccess = 0 - valList[i].ValidatorSuccess = 0 - valList[i].AccumulatedFees = big.NewInt(0) + valList[i].SetLeaderSuccess(0) + valList[i].SetValidatorSuccess(0) + valList[i].SetAccumulatedFees(big.NewInt(0)) } } @@ -1412,9 +1413,9 @@ func TestNewRewardsCreatorV2_computeValidatorInfoPerRewardAddressWithLeavingVali nbShards := int64(args.ShardCoordinator.NumberOfShards()) + 1 args.EconomicsDataProvider.SetLeadersFees(big.NewInt(0).Mul(big.NewInt(int64(proposerFee)), big.NewInt(int64(nbEligiblePerShard)*nbShards))) valInfo := createDefaultValidatorInfo(nbEligiblePerShard, args.ShardCoordinator, args.NodesConfigProvider, proposerFee, defaultBlocksPerShard) - for _, valList := range valInfo { + for _, valList := range valInfo.GetShardValidatorsInfoMap() { for i := 0; i < int(nbLeavingPerShard); i++ { - valList[i].List = string(common.LeavingList) + valList[i].SetList(string(common.LeavingList)) } } @@ -1500,10 +1501,8 @@ func TestNewRewardsCreatorV2_addValidatorRewardsToMiniBlocks(t *testing.T) { DevFeesInEpoch: big.NewInt(0), } sumFees := big.NewInt(0) - for _, vInfoList := range valInfo { - for _, vInfo := range vInfoList { - sumFees.Add(sumFees, vInfo.AccumulatedFees) - } + for _, vInfo := range valInfo.GetAllValidatorsInfo() { + sumFees.Add(sumFees, vInfo.GetAccumulatedFees()) } accumulatedDust, err := rwd.addValidatorRewardsToMiniBlocks(metaBlock, miniBlocks, nodesRewardInfo) @@ -1548,12 +1547,12 @@ func TestNewRewardsCreatorV2_addValidatorRewardsToMiniBlocksAddressInMetaChainDe nbAddrInMetachainPerShard := 2 sumFees := big.NewInt(0) - for _, vInfoList := range valInfo { + for _, vInfoList := range valInfo.GetShardValidatorsInfoMap() { for i, vInfo := range vInfoList { if i < nbAddrInMetachainPerShard { - vInfo.RewardAddress = addrInMeta + vInfo.SetRewardAddress(addrInMeta) } - sumFees.Add(sumFees, vInfo.AccumulatedFees) + sumFees.Add(sumFees, vInfo.GetAccumulatedFees()) } } @@ -1585,15 +1584,15 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks(t *testing.T) { nodesRewardInfo := dummyRwd.initNodesRewardsInfo(vInfo) _, _ = setDummyValuesInNodesRewardInfo(nodesRewardInfo, nbEligiblePerShard, tuStake, 0) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { totalTopUpStake, _ := big.NewInt(0).SetString("3000000000000000000000000", 10) return totalTopUpStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1637,10 +1636,8 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks(t *testing.T) { } sumFees := big.NewInt(0) - for _, vInfoList := range vInfo { - for _, v := range vInfoList { - sumFees.Add(sumFees, v.AccumulatedFees) - } + for _, v := range vInfo.GetAllValidatorsInfo() { + sumFees.Add(sumFees, v.GetAccumulatedFees()) } totalRws := rwd.economicsDataProvider.RewardsToBeDistributedForBlocks() @@ -1683,14 +1680,14 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks2169Nodes(t *testing.T) { topupValue.Mul(topupValue, multiplier) _, totalTopupStake := setValuesInNodesRewardInfo(nodesRewardInfo, topupValue, tuStake) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return totalTopupStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1734,10 +1731,8 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks2169Nodes(t *testing.T) { } sumFees := big.NewInt(0) - for _, vInfoList := range vInfo { - for _, v := range vInfoList { - sumFees.Add(sumFees, v.AccumulatedFees) - } + for _, v := range vInfo.GetAllValidatorsInfo() { + sumFees.Add(sumFees, v.GetAccumulatedFees()) } totalRws := rwd.economicsDataProvider.RewardsToBeDistributedForBlocks() @@ -1781,7 +1776,7 @@ func getRewardsCreatorV2Arguments() RewardsCreatorArgsV2 { } return RewardsCreatorArgsV2{ BaseRewardsCreatorArgs: getBaseRewardsArguments(), - StakingDataProvider: &mock.StakingDataProviderStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, EconomicsDataProvider: NewEpochEconomicsStatistics(), RewardsHandler: rewardsHandler, } @@ -1801,7 +1796,7 @@ func getRewardsCreatorV35Arguments() RewardsCreatorArgsV2 { } return RewardsCreatorArgsV2{ BaseRewardsCreatorArgs: getBaseRewardsArguments(), - StakingDataProvider: &mock.StakingDataProviderStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, EconomicsDataProvider: NewEpochEconomicsStatistics(), RewardsHandler: rewardsHandler, } @@ -1877,7 +1872,7 @@ func createDefaultValidatorInfo( nodesConfigProvider epochStart.NodesConfigProvider, proposerFeesPerNode uint32, nbBlocksPerShard uint32, -) map[uint32][]*state.ValidatorInfo { +) state.ShardValidatorsInfoMapHandler { cGrShard := uint32(nodesConfigProvider.ConsensusGroupSize(0)) cGrMeta := uint32(nodesConfigProvider.ConsensusGroupSize(core.MetachainShardId)) nbBlocksSelectedNodeInShard := nbBlocksPerShard * cGrShard / eligibleNodesPerShard @@ -1886,9 +1881,8 @@ func createDefaultValidatorInfo( shardsMap := createShardsMap(shardCoordinator) var nbBlocksSelected uint32 - validators := make(map[uint32][]*state.ValidatorInfo) + validators := state.NewShardValidatorsInfoMap() for shardID := range shardsMap { - validators[shardID] = make([]*state.ValidatorInfo, eligibleNodesPerShard) nbBlocksSelected = nbBlocksSelectedNodeInShard if shardID == core.MetachainShardId { nbBlocksSelected = nbBlocksSelectedNodeInMeta @@ -1900,7 +1894,7 @@ func createDefaultValidatorInfo( _ = hex.Encode(addrHex, []byte(str)) leaderSuccess := uint32(20) - validators[shardID][i] = &state.ValidatorInfo{ + _ = validators.Add(&state.ValidatorInfo{ PublicKey: []byte(fmt.Sprintf("pubKeyBLS%d%d", shardID, i)), ShardId: shardID, RewardAddress: addrHex, @@ -1909,7 +1903,7 @@ func createDefaultValidatorInfo( NumSelectedInSuccessBlocks: nbBlocksSelected, AccumulatedFees: big.NewInt(int64(proposerFeesPerNode)), List: string(common.EligibleList), - } + }) } } @@ -1918,13 +1912,14 @@ func createDefaultValidatorInfo( func addNonEligibleValidatorInfo( nonEligiblePerShard uint32, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, list string, -) map[uint32][]*state.ValidatorInfo { - resultedValidatorsInfo := make(map[uint32][]*state.ValidatorInfo) - for shardID, valInfoList := range validatorsInfo { +) state.ShardValidatorsInfoMapHandler { + resultedValidatorsInfo := state.NewShardValidatorsInfoMap() + for shardID, valInfoList := range validatorsInfo.GetShardValidatorsInfoMap() { + _ = resultedValidatorsInfo.SetValidatorsInShard(shardID, valInfoList) for i := uint32(0); i < nonEligiblePerShard; i++ { - vInfo := &state.ValidatorInfo{ + _ = resultedValidatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte(fmt.Sprintf("pubKeyBLSExtra%d", i)), ShardId: shardID, RewardAddress: []byte(fmt.Sprintf("addrRewardsExtra%d", i)), @@ -1933,8 +1928,7 @@ func addNonEligibleValidatorInfo( NumSelectedInSuccessBlocks: 1, AccumulatedFees: big.NewInt(int64(10)), List: list, - } - resultedValidatorsInfo[shardID] = append(valInfoList, vInfo) + }) } } diff --git a/epochStart/metachain/rewards_test.go b/epochStart/metachain/rewards_test.go index a41355bef67..b40fe8882e9 100644 --- a/epochStart/metachain/rewards_test.go +++ b/epochStart/metachain/rewards_test.go @@ -136,14 +136,12 @@ func TestRewardsCreator_CreateRewardsMiniBlocks(t *testing.T) { EpochStart: getDefaultEpochStart(), DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + }) bdy, err := rwd.CreateRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Nil(t, err) assert.NotNil(t, bdy) @@ -178,14 +176,12 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksHashDoesNotMatch(t *testing.T) { }, DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Equal(t, epochStart.ErrRewardMiniBlockHashDoesNotMatch, err) @@ -236,15 +232,13 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksRewardsMbNumDoesNotMatch(t *testi mbh.Hash = mbHash mb.MiniBlockHeaders = []block.MiniBlockHeader{mbh, mbh} - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Equal(t, epochStart.ErrRewardMiniBlocksNumDoesNotMatch, err) @@ -393,15 +387,13 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksShouldWork(t *testing.T) { mb.EpochStart.Economics.RewardsForProtocolSustainability.Set(protocolSustainabilityRewardTx.Value) mb.EpochStart.Economics.TotalToDistribute.Set(big.NewInt(0).Add(rwdTx.Value, protocolSustainabilityRewardTx.Value)) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Nil(t, err) @@ -463,15 +455,13 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksShouldWorkEvenIfNotAllShardsHaveR mb.EpochStart.Economics.RewardsForProtocolSustainability.Set(protocolSustainabilityRewardTx.Value) mb.EpochStart.Economics.TotalToDistribute.Set(big.NewInt(0).Add(rwdTx.Value, protocolSustainabilityRewardTx.Value)) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: receivedShardID, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: receivedShardID, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Nil(t, err) @@ -487,14 +477,12 @@ func TestRewardsCreator_CreateMarshalizedData(t *testing.T) { EpochStart: getDefaultEpochStart(), DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + }) _, _ = rwd.CreateRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) rwdTx := rewardTx.RewardTx{ @@ -544,15 +532,13 @@ func TestRewardsCreator_SaveTxBlockToStorage(t *testing.T) { EpochStart: getDefaultEpochStart(), DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) _, _ = rwd.CreateRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) mb2 := block.MetaBlock{ @@ -613,15 +599,13 @@ func TestRewardsCreator_addValidatorRewardsToMiniBlocks(t *testing.T) { expectedRwdTxHash, _ := core.CalculateHash(&marshal.JsonMarshalizer{}, &hashingMocks.HasherMock{}, expectedRwdTx) cloneMb.TxHashes = append(cloneMb.TxHashes, expectedRwdTxHash) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) rwdc.fillBaseRewardsPerBlockPerNode(mb.EpochStart.Economics.RewardsPerBlock) err := rwdc.addValidatorRewardsToMiniBlocks(valInfo, mb, miniBlocks, &rewardTx.RewardTx{}) @@ -648,25 +632,21 @@ func TestRewardsCreator_ProtocolRewardsForValidatorFromMultipleShards(t *testing } pubkey := "pubkey" - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - RewardAddress: []byte(pubkey), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 100, - LeaderSuccess: 1, - }, - } - valInfo[core.MetachainShardId] = []*state.ValidatorInfo{ - { - RewardAddress: []byte(pubkey), - ShardId: core.MetachainShardId, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 200, - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: []byte(pubkey), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 100, + LeaderSuccess: 1, + }) + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: []byte(pubkey), + ShardId: core.MetachainShardId, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 200, + LeaderSuccess: 1, + }) rwdc.fillBaseRewardsPerBlockPerNode(mb.EpochStart.Economics.RewardsPerBlock) rwdInfoData := rwdc.computeValidatorInfoPerRewardAddress(valInfo, &rewardTx.RewardTx{}, 0) @@ -675,8 +655,8 @@ func TestRewardsCreator_ProtocolRewardsForValidatorFromMultipleShards(t *testing assert.Equal(t, rwdInfo.address, pubkey) assert.Equal(t, rwdInfo.accumulatedFees.Cmp(big.NewInt(200)), 0) - protocolRewards := uint64(valInfo[0][0].NumSelectedInSuccessBlocks) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(0))) - protocolRewards += uint64(valInfo[core.MetachainShardId][0].NumSelectedInSuccessBlocks) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(core.MetachainShardId))) + protocolRewards := uint64(valInfo.GetShardValidatorsInfoMap()[0][0].GetNumSelectedInSuccessBlocks()) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(0))) + protocolRewards += uint64(valInfo.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetNumSelectedInSuccessBlocks()) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(core.MetachainShardId))) assert.Equal(t, rwdInfo.rewardsFromProtocol.Uint64(), protocolRewards) } @@ -730,7 +710,7 @@ func TestRewardsCreator_AddProtocolSustainabilityRewardToMiniBlocks(t *testing.T metaBlk.EpochStart.Economics.RewardsForProtocolSustainability.Set(expectedRewardTx.Value) metaBlk.EpochStart.Economics.TotalToDistribute.Set(expectedRewardTx.Value) - miniBlocks, err := rwdc.CreateRewardsMiniBlocks(metaBlk, make(map[uint32][]*state.ValidatorInfo), &metaBlk.EpochStart.Economics) + miniBlocks, err := rwdc.CreateRewardsMiniBlocks(metaBlk, state.NewShardValidatorsInfoMap(), &metaBlk.EpochStart.Economics) assert.Nil(t, err) assert.Equal(t, cloneMb, miniBlocks[0]) } @@ -747,23 +727,21 @@ func TestRewardsCreator_ValidatorInfoWithMetaAddressAddedToProtocolSustainabilit DevFeesInEpoch: big.NewInt(0), } metaBlk.EpochStart.Economics.TotalToDistribute = big.NewInt(20250) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - RewardAddress: vm.StakingSCAddress, - ShardId: 0, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 1, - LeaderSuccess: 1, - }, - { - RewardAddress: vm.FirstDelegationSCAddress, - ShardId: 0, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 1, - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: vm.StakingSCAddress, + ShardId: 0, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 1, + LeaderSuccess: 1, + }) + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: vm.FirstDelegationSCAddress, + ShardId: 0, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 1, + LeaderSuccess: 1, + }) acc, _ := args.UserAccountsDB.LoadAccount(vm.FirstDelegationSCAddress) userAcc, _ := acc.(state.UserAccountHandler) diff --git a/epochStart/metachain/stakingDataProvider.go b/epochStart/metachain/stakingDataProvider.go index bf3faf572b3..aa0129a6f1a 100644 --- a/epochStart/metachain/stakingDataProvider.go +++ b/epochStart/metachain/stakingDataProvider.go @@ -16,46 +16,73 @@ import ( ) type ownerStats struct { - numEligible int - numStakedNodes int64 - topUpValue *big.Int - totalStaked *big.Int - eligibleBaseStake *big.Int - eligibleTopUpStake *big.Int - topUpPerNode *big.Int - blsKeys [][]byte + numEligible int + numStakedNodes int64 + numActiveNodes int64 + totalTopUp *big.Int + topUpPerNode *big.Int + totalStaked *big.Int + eligibleBaseStake *big.Int + eligibleTopUpStake *big.Int + eligibleTopUpPerNode *big.Int + blsKeys [][]byte + auctionList []state.ValidatorInfoHandler + qualified bool +} + +type ownerInfoSC struct { + topUpValue *big.Int + totalStakedValue *big.Int + numStakedWaiting *big.Int + blsKeys [][]byte } type stakingDataProvider struct { - mutStakingData sync.RWMutex - cache map[string]*ownerStats - systemVM vmcommon.VMExecutionHandler - totalEligibleStake *big.Int - totalEligibleTopUpStake *big.Int - minNodePrice *big.Int + mutStakingData sync.RWMutex + cache map[string]*ownerStats + systemVM vmcommon.VMExecutionHandler + totalEligibleStake *big.Int + totalEligibleTopUpStake *big.Int + minNodePrice *big.Int + numOfValidatorsInCurrEpoch uint32 + enableEpochsHandler common.EnableEpochsHandler + validatorStatsInEpoch epochStart.ValidatorStatsInEpoch +} + +// StakingDataProviderArgs is a struct placeholder for all arguments required to create a NewStakingDataProvider +type StakingDataProviderArgs struct { + EnableEpochsHandler common.EnableEpochsHandler + SystemVM vmcommon.VMExecutionHandler + MinNodePrice string } // NewStakingDataProvider will create a new instance of a staking data provider able to aid in the final rewards // computation as this will retrieve the staking data from the system VM -func NewStakingDataProvider( - systemVM vmcommon.VMExecutionHandler, - minNodePrice string, -) (*stakingDataProvider, error) { - if check.IfNil(systemVM) { +func NewStakingDataProvider(args StakingDataProviderArgs) (*stakingDataProvider, error) { + if check.IfNil(args.SystemVM) { return nil, epochStart.ErrNilSystemVmInstance } + if check.IfNil(args.EnableEpochsHandler) { + return nil, epochStart.ErrNilEnableEpochsHandler + } - nodePrice, ok := big.NewInt(0).SetString(minNodePrice, 10) + nodePrice, ok := big.NewInt(0).SetString(args.MinNodePrice, 10) if !ok || nodePrice.Cmp(big.NewInt(0)) <= 0 { return nil, epochStart.ErrInvalidMinNodePrice } sdp := &stakingDataProvider{ - systemVM: systemVM, + systemVM: args.SystemVM, cache: make(map[string]*ownerStats), minNodePrice: nodePrice, totalEligibleStake: big.NewInt(0), totalEligibleTopUpStake: big.NewInt(0), + enableEpochsHandler: args.EnableEpochsHandler, + validatorStatsInEpoch: epochStart.ValidatorStatsInEpoch{ + Eligible: make(map[uint32]int), + Waiting: make(map[uint32]int), + Leaving: make(map[uint32]int), + }, } return sdp, nil @@ -67,6 +94,12 @@ func (sdp *stakingDataProvider) Clean() { sdp.cache = make(map[string]*ownerStats) sdp.totalEligibleStake.SetInt64(0) sdp.totalEligibleTopUpStake.SetInt64(0) + sdp.numOfValidatorsInCurrEpoch = 0 + sdp.validatorStatsInEpoch = epochStart.ValidatorStatsInEpoch{ + Eligible: make(map[uint32]int), + Waiting: make(map[uint32]int), + Leaving: make(map[uint32]int), + } sdp.mutStakingData.Unlock() } @@ -91,7 +124,7 @@ func (sdp *stakingDataProvider) GetTotalTopUpStakeEligibleNodes() *big.Int { // GetNodeStakedTopUp returns the owner of provided bls key staking stats for the current epoch func (sdp *stakingDataProvider) GetNodeStakedTopUp(blsKey []byte) (*big.Int, error) { - owner, err := sdp.getBlsKeyOwner(blsKey) + owner, err := sdp.GetBlsKeyOwner(blsKey) if err != nil { log.Debug("GetOwnerStakingStats", "key", hex.EncodeToString(blsKey), "error", err) return nil, err @@ -102,19 +135,17 @@ func (sdp *stakingDataProvider) GetNodeStakedTopUp(blsKey []byte) (*big.Int, err return nil, epochStart.ErrOwnerDoesntHaveEligibleNodesInEpoch } - return ownerInfo.topUpPerNode, nil + return ownerInfo.eligibleTopUpPerNode, nil } -// PrepareStakingDataForRewards prepares the staking data for the given map of node keys per shard -func (sdp *stakingDataProvider) PrepareStakingDataForRewards(keys map[uint32][][]byte) error { +// PrepareStakingData prepares the staking data for the given map of node keys per shard +func (sdp *stakingDataProvider) PrepareStakingData(validatorsMap state.ShardValidatorsInfoMapHandler) error { sdp.Clean() - for _, keysList := range keys { - for _, blsKey := range keysList { - err := sdp.loadDataForBlsKey(blsKey) - if err != nil { - return err - } + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + err := sdp.loadDataForBlsKey(validator) + if err != nil { + return err } } @@ -146,7 +177,7 @@ func (sdp *stakingDataProvider) processStakingData() { totalEligibleStake.Add(totalEligibleStake, ownerEligibleStake) totalEligibleTopUpStake.Add(totalEligibleTopUpStake, owner.eligibleTopUpStake) - owner.topUpPerNode = big.NewInt(0).Div(owner.eligibleTopUpStake, ownerEligibleNodes) + owner.eligibleTopUpPerNode = big.NewInt(0).Div(owner.eligibleTopUpStake, ownerEligibleNodes) } sdp.totalEligibleTopUpStake = totalEligibleTopUpStake @@ -154,40 +185,49 @@ func (sdp *stakingDataProvider) processStakingData() { } // FillValidatorInfo will fill the validator info for the bls key if it was not already filled -func (sdp *stakingDataProvider) FillValidatorInfo(blsKey []byte) error { +func (sdp *stakingDataProvider) FillValidatorInfo(validator state.ValidatorInfoHandler) error { sdp.mutStakingData.Lock() defer sdp.mutStakingData.Unlock() - _, err := sdp.getAndFillOwnerStatsFromSC(blsKey) + _, err := sdp.getAndFillOwnerStats(validator) return err } -func (sdp *stakingDataProvider) getAndFillOwnerStatsFromSC(blsKey []byte) (*ownerStats, error) { - owner, err := sdp.getBlsKeyOwner(blsKey) +func (sdp *stakingDataProvider) getAndFillOwnerStats(validator state.ValidatorInfoHandler) (*ownerStats, error) { + blsKey := validator.GetPublicKey() + owner, err := sdp.GetBlsKeyOwner(blsKey) if err != nil { log.Debug("error fill owner stats", "step", "get owner from bls", "key", hex.EncodeToString(blsKey), "error", err) return nil, err } - ownerData, err := sdp.getValidatorData(owner) + ownerData, err := sdp.fillOwnerData(owner, validator) if err != nil { log.Debug("error fill owner stats", "step", "get owner data", "key", hex.EncodeToString(blsKey), "owner", hex.EncodeToString([]byte(owner)), "error", err) return nil, err } + if isValidator(validator) { + sdp.numOfValidatorsInCurrEpoch++ + } + + sdp.updateEpochStats(validator) return ownerData, nil } // loadDataForBlsKey will be called for each BLS key that took part in the consensus (no matter the shard ID) so the // staking data can be recovered from the staking system smart contracts. // The function will error if something went wrong. It does change the inner state of the called instance. -func (sdp *stakingDataProvider) loadDataForBlsKey(blsKey []byte) error { +func (sdp *stakingDataProvider) loadDataForBlsKey(validator state.ValidatorInfoHandler) error { sdp.mutStakingData.Lock() defer sdp.mutStakingData.Unlock() - ownerData, err := sdp.getAndFillOwnerStatsFromSC(blsKey) + ownerData, err := sdp.getAndFillOwnerStats(validator) if err != nil { - log.Debug("error computing rewards for bls key", "step", "get owner data", "key", hex.EncodeToString(blsKey), "error", err) + log.Debug("error computing rewards for bls key", + "step", "get owner data", + "key", hex.EncodeToString(validator.GetPublicKey()), + "error", err) return err } ownerData.numEligible++ @@ -195,7 +235,29 @@ func (sdp *stakingDataProvider) loadDataForBlsKey(blsKey []byte) error { return nil } -func (sdp *stakingDataProvider) getBlsKeyOwner(blsKey []byte) (string, error) { +// GetOwnersData returns all owner stats +func (sdp *stakingDataProvider) GetOwnersData() map[string]*epochStart.OwnerData { + sdp.mutStakingData.RLock() + defer sdp.mutStakingData.RUnlock() + + ret := make(map[string]*epochStart.OwnerData) + for owner, ownerData := range sdp.cache { + ret[owner] = &epochStart.OwnerData{ + NumActiveNodes: ownerData.numActiveNodes, + NumStakedNodes: ownerData.numStakedNodes, + TotalTopUp: big.NewInt(0).SetBytes(ownerData.totalTopUp.Bytes()), + TopUpPerNode: big.NewInt(0).SetBytes(ownerData.topUpPerNode.Bytes()), + AuctionList: make([]state.ValidatorInfoHandler, len(ownerData.auctionList)), + Qualified: ownerData.qualified, + } + copy(ret[owner].AuctionList, ownerData.auctionList) + } + + return ret +} + +// GetBlsKeyOwner returns the owner's public key of the provided bls key +func (sdp *stakingDataProvider) GetBlsKeyOwner(blsKey []byte) (string, error) { vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ CallerAddr: vm.ValidatorSCAddress, @@ -221,48 +283,109 @@ func (sdp *stakingDataProvider) getBlsKeyOwner(blsKey []byte) (string, error) { return string(data[0]), nil } -func (sdp *stakingDataProvider) getValidatorData(validatorAddress string) (*ownerStats, error) { - ownerData, exists := sdp.cache[validatorAddress] +func (sdp *stakingDataProvider) fillOwnerData(owner string, validator state.ValidatorInfoHandler) (*ownerStats, error) { + var err error + ownerData, exists := sdp.cache[owner] if exists { - return ownerData, nil + updateOwnerData(ownerData, validator) + } else { + ownerData, err = sdp.getAndFillOwnerDataFromSC(owner, validator) + if err != nil { + return nil, err + } + sdp.cache[owner] = ownerData } - return sdp.getValidatorDataFromStakingSC(validatorAddress) + return ownerData, nil } -func (sdp *stakingDataProvider) getValidatorDataFromStakingSC(validatorAddress string) (*ownerStats, error) { - topUpValue, totalStakedValue, numStakedWaiting, blsKeys, err := sdp.getValidatorInfoFromSC(validatorAddress) +func updateOwnerData(ownerData *ownerStats, validator state.ValidatorInfoHandler) { + if isInAuction(validator) { + ownerData.numActiveNodes-- + ownerData.auctionList = append(ownerData.auctionList, validator.ShallowClone()) + } +} + +func (sdp *stakingDataProvider) getAndFillOwnerDataFromSC(owner string, validator state.ValidatorInfoHandler) (*ownerStats, error) { + ownerInfo, err := sdp.getOwnerInfoFromSC(owner) if err != nil { return nil, err } - ownerData := &ownerStats{ - numEligible: 0, - numStakedNodes: numStakedWaiting.Int64(), - topUpValue: topUpValue, - totalStaked: totalStakedValue, - eligibleBaseStake: big.NewInt(0).Set(sdp.minNodePrice), - eligibleTopUpStake: big.NewInt(0), - topUpPerNode: big.NewInt(0), + topUpPerNode := big.NewInt(0) + numStakedNodes := ownerInfo.numStakedWaiting.Int64() + if numStakedNodes == 0 { + log.Debug("stakingDataProvider.fillOwnerData", + "message", epochStart.ErrOwnerHasNoStakedNode, + "owner", hex.EncodeToString([]byte(owner)), + "validator", hex.EncodeToString(validator.GetPublicKey()), + ) + } else { + topUpPerNode = big.NewInt(0).Div(ownerInfo.topUpValue, ownerInfo.numStakedWaiting) } - ownerData.blsKeys = make([][]byte, len(blsKeys)) - copy(ownerData.blsKeys, blsKeys) + ownerData := &ownerStats{ + numEligible: 0, + numStakedNodes: numStakedNodes, + numActiveNodes: numStakedNodes, + totalTopUp: ownerInfo.topUpValue, + topUpPerNode: topUpPerNode, + totalStaked: ownerInfo.totalStakedValue, + eligibleBaseStake: big.NewInt(0).Set(sdp.minNodePrice), + eligibleTopUpStake: big.NewInt(0), + eligibleTopUpPerNode: big.NewInt(0), + qualified: true, + } + err = sdp.checkAndFillOwnerValidatorAuctionData([]byte(owner), ownerData, validator) + if err != nil { + return nil, err + } - sdp.cache[validatorAddress] = ownerData + ownerData.blsKeys = make([][]byte, len(ownerInfo.blsKeys)) + copy(ownerData.blsKeys, ownerInfo.blsKeys) return ownerData, nil } -func (sdp *stakingDataProvider) getValidatorInfoFromSC(validatorAddress string) (*big.Int, *big.Int, *big.Int, [][]byte, error) { - validatorAddressBytes := []byte(validatorAddress) +func (sdp *stakingDataProvider) checkAndFillOwnerValidatorAuctionData( + ownerPubKey []byte, + ownerData *ownerStats, + validator state.ValidatorInfoHandler, +) error { + validatorInAuction := isInAuction(validator) + if !validatorInAuction { + return nil + } + if ownerData.numStakedNodes == 0 { + return fmt.Errorf("stakingDataProvider.checkAndFillOwnerValidatorAuctionData for validator in auction error: %w, owner: %s, node: %s", + epochStart.ErrOwnerHasNoStakedNode, + hex.EncodeToString(ownerPubKey), + hex.EncodeToString(validator.GetPublicKey()), + ) + } + if !sdp.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return fmt.Errorf("stakingDataProvider.checkAndFillOwnerValidatorAuctionData for validator in auction error: %w, owner: %s, node: %s", + epochStart.ErrReceivedAuctionValidatorsBeforeStakingV4, + hex.EncodeToString(ownerPubKey), + hex.EncodeToString(validator.GetPublicKey()), + ) + } + + ownerData.numActiveNodes -= 1 + ownerData.auctionList = []state.ValidatorInfoHandler{validator} + + return nil +} + +func (sdp *stakingDataProvider) getOwnerInfoFromSC(owner string) (*ownerInfoSC, error) { + ownerAddressBytes := []byte(owner) vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ CallerAddr: vm.EndOfEpochAddress, CallValue: big.NewInt(0), GasProvided: math.MaxInt64, - Arguments: [][]byte{validatorAddressBytes}, + Arguments: [][]byte{ownerAddressBytes}, }, RecipientAddr: vm.ValidatorSCAddress, Function: "getTotalStakedTopUpStakedBlsKeys", @@ -270,41 +393,50 @@ func (sdp *stakingDataProvider) getValidatorInfoFromSC(validatorAddress string) vmOutput, err := sdp.systemVM.RunSmartContractCall(vmInput) if err != nil { - return nil, nil, nil, nil, err + return nil, err } if vmOutput.ReturnCode != vmcommon.Ok { - return nil, nil, nil, nil, fmt.Errorf("%w, error: %v message: %s", epochStart.ErrExecutingSystemScCode, vmOutput.ReturnCode, vmOutput.ReturnMessage) + return nil, fmt.Errorf("%w, error: %v message: %s", epochStart.ErrExecutingSystemScCode, vmOutput.ReturnCode, vmOutput.ReturnMessage) } if len(vmOutput.ReturnData) < 3 { - return nil, nil, nil, nil, fmt.Errorf("%w, getTotalStakedTopUpStakedBlsKeys function should have at least three values", epochStart.ErrExecutingSystemScCode) + return nil, fmt.Errorf("%w, getTotalStakedTopUpStakedBlsKeys function should have at least three values", epochStart.ErrExecutingSystemScCode) } topUpValue := big.NewInt(0).SetBytes(vmOutput.ReturnData[0]) totalStakedValue := big.NewInt(0).SetBytes(vmOutput.ReturnData[1]) numStakedWaiting := big.NewInt(0).SetBytes(vmOutput.ReturnData[2]) - return topUpValue, totalStakedValue, numStakedWaiting, vmOutput.ReturnData[3:], nil + return &ownerInfoSC{ + topUpValue: topUpValue, + totalStakedValue: totalStakedValue, + numStakedWaiting: numStakedWaiting, + blsKeys: vmOutput.ReturnData[3:], + }, nil } // ComputeUnQualifiedNodes will compute which nodes are not qualified - do not have enough tokens to be validators -func (sdp *stakingDataProvider) ComputeUnQualifiedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) { +func (sdp *stakingDataProvider) ComputeUnQualifiedNodes(validatorsInfo state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { sdp.mutStakingData.Lock() defer sdp.mutStakingData.Unlock() mapOwnersKeys := make(map[string][][]byte) keysToUnStake := make([][]byte, 0) - mapBLSKeyStatus := createMapBLSKeyStatus(validatorInfos) + mapBLSKeyStatus, err := sdp.createMapBLSKeyStatus(validatorsInfo) + if err != nil { + return nil, nil, err + } + for ownerAddress, stakingInfo := range sdp.cache { maxQualified := big.NewInt(0).Div(stakingInfo.totalStaked, sdp.minNodePrice) if maxQualified.Int64() >= stakingInfo.numStakedNodes { continue } - sortedKeys := arrangeBlsKeysByStatus(mapBLSKeyStatus, stakingInfo.blsKeys) + sortedKeys := sdp.arrangeBlsKeysByStatus(mapBLSKeyStatus, stakingInfo.blsKeys) numKeysToUnStake := stakingInfo.numStakedNodes - maxQualified.Int64() - selectedKeys := selectKeysToUnStake(sortedKeys, numKeysToUnStake) + selectedKeys, numRemovedValidators := sdp.selectKeysToUnStake(sortedKeys, numKeysToUnStake) if len(selectedKeys) == 0 { continue } @@ -313,31 +445,44 @@ func (sdp *stakingDataProvider) ComputeUnQualifiedNodes(validatorInfos map[uint3 mapOwnersKeys[ownerAddress] = make([][]byte, len(selectedKeys)) copy(mapOwnersKeys[ownerAddress], selectedKeys) + + stakingInfo.qualified = false + sdp.numOfValidatorsInCurrEpoch -= uint32(numRemovedValidators) } return keysToUnStake, mapOwnersKeys, nil } -func createMapBLSKeyStatus(validatorInfos map[uint32][]*state.ValidatorInfo) map[string]string { +func (sdp *stakingDataProvider) createMapBLSKeyStatus(validatorsInfo state.ShardValidatorsInfoMapHandler) (map[string]string, error) { mapBLSKeyStatus := make(map[string]string) - for _, validatorsInfoSlice := range validatorInfos { - for _, validatorInfo := range validatorsInfoSlice { - mapBLSKeyStatus[string(validatorInfo.PublicKey)] = validatorInfo.List + for _, validator := range validatorsInfo.GetAllValidatorsInfo() { + list := validator.GetList() + pubKey := validator.GetPublicKey() + + if sdp.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) && list == string(common.NewList) { + return nil, fmt.Errorf("%w, bls key = %s", + epochStart.ErrReceivedNewListNodeInStakingV4, + hex.EncodeToString(pubKey), + ) } + + mapBLSKeyStatus[string(pubKey)] = list } - return mapBLSKeyStatus + return mapBLSKeyStatus, nil } -func selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) [][]byte { +func (sdp *stakingDataProvider) selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) ([][]byte, int) { selectedKeys := make([][]byte, 0) - newKeys := sortedKeys[string(common.NewList)] + newNodesList := sdp.getNewNodesList() + + newKeys := sortedKeys[newNodesList] if len(newKeys) > 0 { selectedKeys = append(selectedKeys, newKeys...) } if int64(len(selectedKeys)) >= numToSelect { - return selectedKeys[:numToSelect] + return selectedKeys[:numToSelect], 0 } waitingKeys := sortedKeys[string(common.WaitingList)] @@ -346,7 +491,9 @@ func selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) [][] } if int64(len(selectedKeys)) >= numToSelect { - return selectedKeys[:numToSelect] + overFlowKeys := len(selectedKeys) - int(numToSelect) + removedWaiting := len(waitingKeys) - overFlowKeys + return selectedKeys[:numToSelect], removedWaiting } eligibleKeys := sortedKeys[string(common.EligibleList)] @@ -355,18 +502,22 @@ func selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) [][] } if int64(len(selectedKeys)) >= numToSelect { - return selectedKeys[:numToSelect] + overFlowKeys := len(selectedKeys) - int(numToSelect) + removedEligible := len(eligibleKeys) - overFlowKeys + return selectedKeys[:numToSelect], removedEligible + len(waitingKeys) } - return selectedKeys + return selectedKeys, len(eligibleKeys) + len(waitingKeys) } -func arrangeBlsKeysByStatus(mapBlsKeyStatus map[string]string, blsKeys [][]byte) map[string][][]byte { +func (sdp *stakingDataProvider) arrangeBlsKeysByStatus(mapBlsKeyStatus map[string]string, blsKeys [][]byte) map[string][][]byte { sortedKeys := make(map[string][][]byte) + newNodesList := sdp.getNewNodesList() + for _, blsKey := range blsKeys { - blsKeyStatus, ok := mapBlsKeyStatus[string(blsKey)] - if !ok { - sortedKeys[string(common.NewList)] = append(sortedKeys[string(common.NewList)], blsKey) + blsKeyStatus, found := mapBlsKeyStatus[string(blsKey)] + if !found { + sortedKeys[newNodesList] = append(sortedKeys[newNodesList], blsKey) continue } @@ -376,6 +527,79 @@ func arrangeBlsKeysByStatus(mapBlsKeyStatus map[string]string, blsKeys [][]byte) return sortedKeys } +func (sdp *stakingDataProvider) getNewNodesList() string { + newNodesList := string(common.NewList) + if sdp.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + newNodesList = string(common.AuctionList) + } + + return newNodesList +} + +// GetNumOfValidatorsInCurrentEpoch returns the number of validators(eligible + waiting) in current epoch +func (sdp *stakingDataProvider) GetNumOfValidatorsInCurrentEpoch() uint32 { + sdp.mutStakingData.RLock() + defer sdp.mutStakingData.RUnlock() + + return sdp.numOfValidatorsInCurrEpoch +} + +func (sdp *stakingDataProvider) updateEpochStats(validator state.ValidatorInfoHandler) { + validatorCurrentList := common.PeerType(validator.GetList()) + shardID := validator.GetShardId() + + if validatorCurrentList == common.EligibleList { + sdp.validatorStatsInEpoch.Eligible[shardID]++ + return + } + + if validatorCurrentList == common.WaitingList { + sdp.validatorStatsInEpoch.Waiting[shardID]++ + return + } + + validatorPreviousList := common.PeerType(validator.GetPreviousList()) + if sdp.isValidatorLeaving(validatorCurrentList, validatorPreviousList) { + sdp.validatorStatsInEpoch.Leaving[shardID]++ + } +} + +func (sdp *stakingDataProvider) isValidatorLeaving(validatorCurrentList, validatorPreviousList common.PeerType) bool { + if validatorCurrentList != common.LeavingList { + return false + } + + // If no previous list is set, means that staking v4 is not activated or node is leaving right before activation + // and this node will be considered as eligible by the nodes coordinator with old code. + // Otherwise, it will have it set, and we should check its previous list in the current epoch + return len(validatorPreviousList) == 0 || validatorPreviousList == common.EligibleList || validatorPreviousList == common.WaitingList +} + +// GetCurrentEpochValidatorStats returns the current epoch validator stats +func (sdp *stakingDataProvider) GetCurrentEpochValidatorStats() epochStart.ValidatorStatsInEpoch { + sdp.mutStakingData.RLock() + defer sdp.mutStakingData.RUnlock() + + return copyValidatorStatsInEpoch(sdp.validatorStatsInEpoch) +} + +func copyValidatorStatsInEpoch(oldInstance epochStart.ValidatorStatsInEpoch) epochStart.ValidatorStatsInEpoch { + return epochStart.ValidatorStatsInEpoch{ + Eligible: copyMap(oldInstance.Eligible), + Waiting: copyMap(oldInstance.Waiting), + Leaving: copyMap(oldInstance.Leaving), + } +} + +func copyMap(oldMap map[uint32]int) map[uint32]int { + newMap := make(map[uint32]int, len(oldMap)) + for key, value := range oldMap { + newMap[key] = value + } + + return newMap +} + // IsInterfaceNil return true if underlying object is nil func (sdp *stakingDataProvider) IsInterfaceNil() bool { return sdp == nil diff --git a/epochStart/metachain/stakingDataProvider_test.go b/epochStart/metachain/stakingDataProvider_test.go index 257485e8f0c..6f5ad62868d 100644 --- a/epochStart/metachain/stakingDataProvider_test.go +++ b/epochStart/metachain/stakingDataProvider_test.go @@ -7,6 +7,7 @@ import ( "fmt" "math/big" "strings" + "sync" "testing" "github.com/multiversx/mx-chain-core-go/core" @@ -17,28 +18,49 @@ import ( "github.com/multiversx/mx-chain-go/epochStart/mock" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/vm" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestNewStakingDataProvider_NilSystemVMShouldErr(t *testing.T) { - t.Parallel() - - sdp, err := NewStakingDataProvider(nil, "100000") +const stakingV4Step1EnableEpoch = 444 +const stakingV4Step2EnableEpoch = 445 - assert.True(t, check.IfNil(sdp)) - assert.Equal(t, epochStart.ErrNilSystemVmInstance, err) +func createStakingDataProviderArgs() StakingDataProviderArgs { + return StakingDataProviderArgs{ + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + SystemVM: &mock.VMExecutionHandlerStub{}, + MinNodePrice: "2500", + } } -func TestNewStakingDataProvider_ShouldWork(t *testing.T) { +func TestNewStakingDataProvider_NilInputPointersShouldErr(t *testing.T) { t.Parallel() - sdp, err := NewStakingDataProvider(&mock.VMExecutionHandlerStub{}, "100000") + t.Run("nil system vm", func(t *testing.T) { + args := createStakingDataProviderArgs() + args.SystemVM = nil + sdp, err := NewStakingDataProvider(args) + assert.True(t, check.IfNil(sdp)) + assert.Equal(t, epochStart.ErrNilSystemVmInstance, err) + }) - assert.False(t, check.IfNil(sdp)) - assert.Nil(t, err) + t.Run("nil epoch notifier", func(t *testing.T) { + args := createStakingDataProviderArgs() + args.EnableEpochsHandler = nil + sdp, err := NewStakingDataProvider(args) + assert.True(t, check.IfNil(sdp)) + assert.Equal(t, vm.ErrNilEnableEpochsHandler, err) + }) + + t.Run("should work", func(t *testing.T) { + args := createStakingDataProviderArgs() + sdp, err := NewStakingDataProvider(args) + assert.False(t, check.IfNil(sdp)) + assert.Nil(t, err) + }) } func TestStakingDataProvider_PrepareDataForBlsKeyGetBlsKeyOwnerErrorsShouldErr(t *testing.T) { @@ -46,7 +68,8 @@ func TestStakingDataProvider_PrepareDataForBlsKeyGetBlsKeyOwnerErrorsShouldErr(t numCall := 0 expectedErr := errors.New("expected error") - sdp, _ := NewStakingDataProvider(&mock.VMExecutionHandlerStub{ + args := createStakingDataProviderArgs() + args.SystemVM = &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { numCall++ if numCall == 1 { @@ -65,17 +88,18 @@ func TestStakingDataProvider_PrepareDataForBlsKeyGetBlsKeyOwnerErrorsShouldErr(t return nil, nil }, - }, "100000") + } + sdp, _ := NewStakingDataProvider(args) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Equal(t, expectedErr, err) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), vmcommon.UserError.String())) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), "returned exactly one value: the owner address")) @@ -87,7 +111,8 @@ func TestStakingDataProvider_PrepareDataForBlsKeyLoadOwnerDataErrorsShouldErr(t numCall := 0 owner := []byte("owner") expectedErr := errors.New("expected error") - sdp, _ := NewStakingDataProvider(&mock.VMExecutionHandlerStub{ + args := createStakingDataProviderArgs() + args.SystemVM = &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { if input.Function == "getOwner" { return &vmcommon.VMOutput{ @@ -111,17 +136,18 @@ func TestStakingDataProvider_PrepareDataForBlsKeyLoadOwnerDataErrorsShouldErr(t } return nil, nil }, - }, "100000") + } + sdp, _ := NewStakingDataProvider(args) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Equal(t, expectedErr, err) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), vmcommon.UserError.String())) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), "getTotalStakedTopUpStakedBlsKeys function should have at least three values")) @@ -138,12 +164,12 @@ func TestStakingDataProvider_PrepareDataForBlsKeyFromSCShouldWork(t *testing.T) sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Nil(t, err) assert.Equal(t, 2, numRunContractCalls) ownerData := sdp.GetFromCache(owner) require.NotNil(t, ownerData) - assert.Equal(t, topUpVal, ownerData.topUpValue) + assert.Equal(t, topUpVal, ownerData.totalTopUp) assert.Equal(t, 1, ownerData.numEligible) } @@ -158,16 +184,16 @@ func TestStakingDataProvider_PrepareDataForBlsKeyCachedResponseShouldWork(t *tes sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Nil(t, err) - err = sdp.loadDataForBlsKey([]byte("bls key2")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key2")}) assert.Nil(t, err) assert.Equal(t, 3, numRunContractCalls) ownerData := sdp.GetFromCache(owner) require.NotNil(t, ownerData) - assert.Equal(t, topUpVal, ownerData.topUpValue) + assert.Equal(t, topUpVal, ownerData.totalTopUp) assert.Equal(t, 2, ownerData.numEligible) } @@ -179,11 +205,11 @@ func TestStakingDataProvider_PrepareDataForBlsKeyWithRealSystemVmShouldWork(t *t blsKey := []byte("bls key") sdp := createStakingDataProviderWithRealArgs(t, owner, blsKey, topUpVal) - err := sdp.loadDataForBlsKey(blsKey) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: blsKey}) assert.Nil(t, err) ownerData := sdp.GetFromCache(owner) require.NotNil(t, ownerData) - assert.Equal(t, topUpVal, ownerData.topUpValue) + assert.Equal(t, topUpVal, ownerData.totalTopUp) assert.Equal(t, 1, ownerData.numEligible) } @@ -224,6 +250,39 @@ func TestStakingDataProvider_ComputeUnQualifiedNodes(t *testing.T) { require.Zero(t, len(ownersWithNotEnoughFunds)) } +func TestStakingDataProvider_ComputeUnQualifiedNodesWithStakingV4ReceivedNewListNode(t *testing.T) { + v0 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey0"), + List: string(common.EligibleList), + RewardAddress: []byte("address0"), + } + v1 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey1"), + List: string(common.NewList), + RewardAddress: []byte("address0"), + } + v2 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey2"), + List: string(common.AuctionList), + RewardAddress: []byte("address1"), + } + + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(v0) + _ = valInfo.Add(v1) + _ = valInfo.Add(v2) + + sdp := createStakingDataProviderAndUpdateCache(t, valInfo, big.NewInt(0)) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + keysToUnStake, ownersWithNotEnoughFunds, err := sdp.ComputeUnQualifiedNodes(valInfo) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), epochStart.ErrReceivedNewListNodeInStakingV4.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v1.PublicKey))) + require.Empty(t, keysToUnStake) + require.Empty(t, ownersWithNotEnoughFunds) +} + func TestStakingDataProvider_ComputeUnQualifiedNodesWithOwnerNotEnoughFunds(t *testing.T) { nbShards := uint32(3) nbEligible := make(map[uint32]uint32) @@ -259,6 +318,39 @@ func TestStakingDataProvider_ComputeUnQualifiedNodesWithOwnerNotEnoughFunds(t *t require.Equal(t, 1, len(ownersWithNotEnoughFunds)) } +func TestStakingDataProvider_ComputeUnQualifiedNodesWithOwnerNotEnoughFundsWithStakingV4(t *testing.T) { + owner := "address0" + v0 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey0"), + List: string(common.EligibleList), + RewardAddress: []byte(owner), + } + v1 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey1"), + List: string(common.AuctionList), + RewardAddress: []byte(owner), + } + + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(v0) + _ = valInfo.Add(v1) + + sdp := createStakingDataProviderAndUpdateCache(t, valInfo, big.NewInt(0)) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sdp.cache[owner].blsKeys = append(sdp.cache[owner].blsKeys, []byte("newKey")) + sdp.cache[owner].totalStaked = big.NewInt(2500) + sdp.cache[owner].numStakedNodes++ + + keysToUnStake, ownersWithNotEnoughFunds, err := sdp.ComputeUnQualifiedNodes(valInfo) + require.Nil(t, err) + + expectedUnStakedKeys := [][]byte{[]byte("blsKey1"), []byte("newKey")} + expectedOwnerWithNotEnoughFunds := map[string][][]byte{owner: expectedUnStakedKeys} + require.Equal(t, expectedUnStakedKeys, keysToUnStake) + require.Equal(t, expectedOwnerWithNotEnoughFunds, ownersWithNotEnoughFunds) +} + func TestStakingDataProvider_GetTotalStakeEligibleNodes(t *testing.T) { t.Parallel() @@ -345,13 +437,13 @@ func TestStakingDataProvider_GetNodeStakedTopUpShouldWork(t *testing.T) { sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) expectedOwnerStats := &ownerStats{ - topUpPerNode: big.NewInt(37), + eligibleTopUpPerNode: big.NewInt(37), } sdp.SetInCache(owner, expectedOwnerStats) res, err := sdp.GetNodeStakedTopUp(owner) require.NoError(t, err) - require.Equal(t, expectedOwnerStats.topUpPerNode, res) + require.Equal(t, expectedOwnerStats.eligibleTopUpPerNode, res) } func TestStakingDataProvider_PrepareStakingDataForRewards(t *testing.T) { @@ -365,25 +457,201 @@ func TestStakingDataProvider_PrepareStakingDataForRewards(t *testing.T) { sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - keys := make(map[uint32][][]byte) - keys[0] = append(keys[0], []byte("owner")) - err := sdp.PrepareStakingDataForRewards(keys) + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{PublicKey: owner, ShardId: 0}) + err := sdp.PrepareStakingData(validatorsMap) require.NoError(t, err) } func TestStakingDataProvider_FillValidatorInfo(t *testing.T) { t.Parallel() - owner := []byte("owner") - topUpVal := big.NewInt(828743) - basePrice := big.NewInt(100000) - stakeVal := big.NewInt(0).Add(topUpVal, basePrice) - numRunContractCalls := 0 + t.Run("should work", func(t *testing.T) { + t.Parallel() - sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) + owner := []byte("owner") + topUpVal := big.NewInt(828743) + basePrice := big.NewInt(100000) + stakeVal := big.NewInt(0).Add(topUpVal, basePrice) + numRunContractCalls := 0 - err := sdp.FillValidatorInfo([]byte("owner")) - require.NoError(t, err) + sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) + + err := sdp.FillValidatorInfo(&state.ValidatorInfo{PublicKey: []byte("bls key")}) + require.NoError(t, err) + }) + t.Run("concurrent calls should work", func(t *testing.T) { + t.Parallel() + + owner := []byte("owner") + topUpVal := big.NewInt(828743) + basePrice := big.NewInt(100000) + stakeVal := big.NewInt(0).Add(topUpVal, basePrice) + numRunContractCalls := 0 + + sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) + + wg := sync.WaitGroup{} + numCalls := 100 + wg.Add(numCalls) + + require.NotPanics(t, func() { + for i := 0; i < numCalls; i++ { + go func(idx int) { + switch idx % 2 { + case 0: + err := sdp.FillValidatorInfo(&state.ValidatorInfo{ + PublicKey: []byte("bls key"), + List: string(common.EligibleList), + ShardId: 0, + }) + require.NoError(t, err) + case 1: + stats := sdp.GetCurrentEpochValidatorStats() + log.Info(fmt.Sprintf("%d", stats.Eligible[0])) + } + + wg.Done() + }(i) + } + + wg.Wait() + }) + }) +} + +func TestCheckAndFillOwnerValidatorAuctionData(t *testing.T) { + t.Parallel() + + t.Run("validator not in auction, expect no error, no owner data update", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + + ownerData := &ownerStats{} + err := sdp.checkAndFillOwnerValidatorAuctionData([]byte("owner"), ownerData, &state.ValidatorInfo{List: string(common.NewList)}) + require.Nil(t, err) + require.Equal(t, &ownerStats{}, ownerData) + }) + + t.Run("validator in auction, but no staked node, expect error", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + + owner := []byte("owner") + ownerData := &ownerStats{numStakedNodes: 0} + validator := &state.ValidatorInfo{PublicKey: []byte("validatorPubKey"), List: string(common.AuctionList)} + + err := sdp.checkAndFillOwnerValidatorAuctionData(owner, ownerData, validator) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), epochStart.ErrOwnerHasNoStakedNode.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(owner))) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(validator.PublicKey))) + require.Equal(t, &ownerStats{numStakedNodes: 0}, ownerData) + }) + + t.Run("validator in auction, staking v4 not enabled yet, expect error", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + + owner := []byte("owner") + ownerData := &ownerStats{numStakedNodes: 1} + validator := &state.ValidatorInfo{PublicKey: []byte("validatorPubKey"), List: string(common.AuctionList)} + + err := sdp.checkAndFillOwnerValidatorAuctionData(owner, ownerData, validator) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), epochStart.ErrReceivedAuctionValidatorsBeforeStakingV4.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(owner))) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(validator.PublicKey))) + require.Equal(t, &ownerStats{numStakedNodes: 1}, ownerData) + }) + + t.Run("should update owner's data", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4StartedFlag) + + owner := []byte("owner") + ownerData := &ownerStats{numStakedNodes: 3, numActiveNodes: 3} + validator := &state.ValidatorInfo{PublicKey: []byte("validatorPubKey"), List: string(common.AuctionList)} + + err := sdp.checkAndFillOwnerValidatorAuctionData(owner, ownerData, validator) + require.Nil(t, err) + require.Equal(t, &ownerStats{ + numStakedNodes: 3, + numActiveNodes: 2, + auctionList: []state.ValidatorInfoHandler{validator}, + }, ownerData) + }) +} + +func TestSelectKeysToUnStake(t *testing.T) { + t.Parallel() + + t.Run("no validator removed", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 2) + require.Equal(t, [][]byte{[]byte("pk0")}, unStakedKeys) + require.Equal(t, 0, removedValidators) + }) + + t.Run("overflow from waiting", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + string(common.EligibleList): {[]byte("pk2")}, + string(common.WaitingList): {[]byte("pk3"), []byte("pk4"), []byte("pk5")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 2) + require.Equal(t, [][]byte{[]byte("pk0"), []byte("pk3")}, unStakedKeys) + require.Equal(t, 1, removedValidators) + }) + + t.Run("overflow from eligible", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + string(common.EligibleList): {[]byte("pk1"), []byte("pk2")}, + string(common.WaitingList): {[]byte("pk4"), []byte("pk5")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 4) + require.Equal(t, [][]byte{[]byte("pk0"), []byte("pk4"), []byte("pk5"), []byte("pk1")}, unStakedKeys) + require.Equal(t, 3, removedValidators) + }) + + t.Run("no overflow", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + string(common.EligibleList): {[]byte("pk1")}, + string(common.WaitingList): {[]byte("pk2")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 3) + require.Equal(t, [][]byte{[]byte("pk0"), []byte("pk2"), []byte("pk1")}, unStakedKeys) + require.Equal(t, 2, removedValidators) + }) } func createStakingDataProviderWithMockArgs( @@ -393,7 +661,8 @@ func createStakingDataProviderWithMockArgs( stakingVal *big.Int, numRunContractCalls *int, ) *stakingDataProvider { - sdp, err := NewStakingDataProvider(&mock.VMExecutionHandlerStub{ + args := createStakingDataProviderArgs() + args.SystemVM = &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { *numRunContractCalls++ switch input.Function { @@ -417,7 +686,8 @@ func createStakingDataProviderWithMockArgs( return nil, errors.New("unexpected call") }, - }, "100000") + } + sdp, err := NewStakingDataProvider(args) require.Nil(t, err) return sdp @@ -435,7 +705,9 @@ func createStakingDataProviderWithRealArgs(t *testing.T, owner []byte, blsKey [] doStake(t, s.systemVM, s.userAccountsDB, owner, big.NewInt(0).Add(big.NewInt(1000), topUpVal), blsKey) - sdp, _ := NewStakingDataProvider(s.systemVM, "100000") + argsStakingDataProvider := createStakingDataProviderArgs() + argsStakingDataProvider.SystemVM = s.systemVM + sdp, _ := NewStakingDataProvider(argsStakingDataProvider) return sdp } @@ -464,27 +736,28 @@ func saveOutputAccounts(t *testing.T, accountsDB state.AccountsAdapter, vmOutput require.Nil(t, err) } -func createStakingDataProviderAndUpdateCache(t *testing.T, validatorsInfo map[uint32][]*state.ValidatorInfo, topUpValue *big.Int) *stakingDataProvider { - +func createStakingDataProviderAndUpdateCache(t *testing.T, validatorsInfo state.ShardValidatorsInfoMapHandler, topUpValue *big.Int) *stakingDataProvider { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1, }, testscommon.CreateMemUnit()) args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, }) - sdp, _ := NewStakingDataProvider(args.SystemVM, "2500") + + argsStakingDataProvider := createStakingDataProviderArgs() + argsStakingDataProvider.SystemVM = args.SystemVM + sdp, _ := NewStakingDataProvider(argsStakingDataProvider) args.StakingDataProvider = sdp s, _ := NewSystemSCProcessor(args) require.NotNil(t, s) - for _, valsList := range validatorsInfo { - for _, valInfo := range valsList { - stake := big.NewInt(0).Add(big.NewInt(2500), topUpValue) - if valInfo.List != string(common.LeavingList) && valInfo.List != string(common.InactiveList) { - doStake(t, s.systemVM, s.userAccountsDB, valInfo.RewardAddress, stake, valInfo.PublicKey) - } - updateCache(sdp, valInfo.RewardAddress, valInfo.PublicKey, valInfo.List, stake) + for _, valInfo := range validatorsInfo.GetAllValidatorsInfo() { + stake := big.NewInt(0).Add(big.NewInt(2500), topUpValue) + if valInfo.GetList() != string(common.LeavingList) && valInfo.GetList() != string(common.InactiveList) { + doStake(t, s.systemVM, s.userAccountsDB, valInfo.GetRewardAddress(), stake, valInfo.GetPublicKey()) } + updateCache(sdp, valInfo.GetRewardAddress(), valInfo.GetPublicKey(), valInfo.GetList(), stake) + } return sdp @@ -495,14 +768,14 @@ func updateCache(sdp *stakingDataProvider, ownerAddress []byte, blsKey []byte, l if owner == nil { owner = &ownerStats{ - numEligible: 0, - numStakedNodes: 0, - topUpValue: big.NewInt(0), - totalStaked: big.NewInt(0), - eligibleBaseStake: big.NewInt(0), - eligibleTopUpStake: big.NewInt(0), - topUpPerNode: big.NewInt(0), - blsKeys: nil, + numEligible: 0, + numStakedNodes: 0, + totalTopUp: big.NewInt(0), + totalStaked: big.NewInt(0), + eligibleBaseStake: big.NewInt(0), + eligibleTopUpStake: big.NewInt(0), + eligibleTopUpPerNode: big.NewInt(0), + blsKeys: nil, } } @@ -518,12 +791,12 @@ func updateCache(sdp *stakingDataProvider, ownerAddress []byte, blsKey []byte, l sdp.cache[string(ownerAddress)] = owner } -func createValidatorsInfo(nbShards uint32, nbEligible, nbWaiting, nbLeaving, nbInactive map[uint32]uint32) map[uint32][]*state.ValidatorInfo { - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) +func createValidatorsInfo(nbShards uint32, nbEligible, nbWaiting, nbLeaving, nbInactive map[uint32]uint32) state.ShardValidatorsInfoMapHandler { + validatorsInfo := state.NewShardValidatorsInfoMap() shardMap := shardsMap(nbShards) for shardID := range shardMap { - valInfoList := make([]*state.ValidatorInfo, 0) + valInfoList := make([]state.ValidatorInfoHandler, 0) for eligible := uint32(0); eligible < nbEligible[shardID]; eligible++ { vInfo := &state.ValidatorInfo{ PublicKey: []byte(fmt.Sprintf("blsKey%s%d%d", common.EligibleList, shardID, eligible)), @@ -561,7 +834,7 @@ func createValidatorsInfo(nbShards uint32, nbEligible, nbWaiting, nbLeaving, nbI } valInfoList = append(valInfoList, vInfo) } - validatorsInfo[shardID] = valInfoList + _ = validatorsInfo.SetValidatorsInShard(shardID, valInfoList) } return validatorsInfo } diff --git a/epochStart/metachain/systemSCs.go b/epochStart/metachain/systemSCs.go index 39bfa4c2e41..96cba60251b 100644 --- a/epochStart/metachain/systemSCs.go +++ b/epochStart/metachain/systemSCs.go @@ -1,30 +1,20 @@ package metachain import ( - "bytes" - "context" "fmt" - "math" "math/big" - "sort" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/errChan" - vInfo "github.com/multiversx/mx-chain-go/common/validatorInfo" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/vm" - "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -41,107 +31,41 @@ type ArgsNewEpochStartSystemSCProcessing struct { EndOfEpochCallerAddress []byte StakingSCAddress []byte - MaxNodesEnableConfig []config.MaxNodesChangeConfig ESDTOwnerAddressBytes []byte - GenesisNodesConfig sharding.GenesisNodesSetupHandler - EpochNotifier process.EpochNotifier - NodesConfigProvider epochStart.NodesConfigProvider - StakingDataProvider epochStart.StakingDataProvider - EnableEpochsHandler common.EnableEpochsHandler + GenesisNodesConfig sharding.GenesisNodesSetupHandler + EpochNotifier process.EpochNotifier + NodesConfigProvider epochStart.NodesConfigProvider + StakingDataProvider epochStart.StakingDataProvider + AuctionListSelector epochStart.AuctionListSelector + MaxNodesChangeConfigProvider epochStart.MaxNodesChangeConfigProvider + EnableEpochsHandler common.EnableEpochsHandler } type systemSCProcessor struct { - systemVM vmcommon.VMExecutionHandler - userAccountsDB state.AccountsAdapter - marshalizer marshal.Marshalizer - peerAccountsDB state.AccountsAdapter - chanceComputer nodesCoordinator.ChanceComputer - shardCoordinator sharding.Coordinator - startRating uint32 - validatorInfoCreator epochStart.ValidatorInfoCreator - genesisNodesConfig sharding.GenesisNodesSetupHandler - nodesConfigProvider epochStart.NodesConfigProvider - stakingDataProvider epochStart.StakingDataProvider - endOfEpochCallerAddress []byte - stakingSCAddress []byte - maxNodesEnableConfig []config.MaxNodesChangeConfig - maxNodes uint32 - flagChangeMaxNodesEnabled atomic.Flag - esdtOwnerAddressBytes []byte - mapNumSwitchedPerShard map[uint32]uint32 - mapNumSwitchablePerShard map[uint32]uint32 - enableEpochsHandler common.EnableEpochsHandler -} - -type validatorList []*state.ValidatorInfo - -// Len will return the length of the validatorList -func (v validatorList) Len() int { return len(v) } - -// Swap will interchange the objects on input indexes -func (v validatorList) Swap(i, j int) { v[i], v[j] = v[j], v[i] } - -// Less will return true if object on index i should appear before object in index j -// Sorting of validators should be by index and public key -func (v validatorList) Less(i, j int) bool { - if v[i].TempRating == v[j].TempRating { - if v[i].Index == v[j].Index { - return bytes.Compare(v[i].PublicKey, v[j].PublicKey) < 0 - } - return v[i].Index < v[j].Index - } - return v[i].TempRating < v[j].TempRating + *legacySystemSCProcessor + auctionListSelector epochStart.AuctionListSelector + enableEpochsHandler common.EnableEpochsHandler } // NewSystemSCProcessor creates the end of epoch system smart contract processor func NewSystemSCProcessor(args ArgsNewEpochStartSystemSCProcessing) (*systemSCProcessor, error) { - if check.IfNilReflect(args.SystemVM) { - return nil, epochStart.ErrNilSystemVM - } - if check.IfNil(args.UserAccountsDB) { - return nil, epochStart.ErrNilAccountsDB - } - if check.IfNil(args.PeerAccountsDB) { - return nil, epochStart.ErrNilAccountsDB - } - if check.IfNil(args.Marshalizer) { - return nil, epochStart.ErrNilMarshalizer - } - if check.IfNil(args.ValidatorInfoCreator) { - return nil, epochStart.ErrNilValidatorInfoProcessor - } - if len(args.EndOfEpochCallerAddress) == 0 { - return nil, epochStart.ErrNilEndOfEpochCallerAddress - } - if len(args.StakingSCAddress) == 0 { - return nil, epochStart.ErrNilStakingSCAddress - } - if check.IfNil(args.ChanceComputer) { - return nil, epochStart.ErrNilChanceComputer - } if check.IfNil(args.EpochNotifier) { return nil, epochStart.ErrNilEpochStartNotifier } - if check.IfNil(args.GenesisNodesConfig) { - return nil, epochStart.ErrNilGenesisNodesConfig - } - if check.IfNil(args.NodesConfigProvider) { - return nil, epochStart.ErrNilNodesConfigProvider + if check.IfNil(args.AuctionListSelector) { + return nil, epochStart.ErrNilAuctionListSelector } - if check.IfNil(args.StakingDataProvider) { - return nil, epochStart.ErrNilStakingDataProvider - } - if check.IfNil(args.ShardCoordinator) { - return nil, epochStart.ErrNilShardCoordinator - } - if len(args.ESDTOwnerAddressBytes) == 0 { - return nil, epochStart.ErrEmptyESDTOwnerAddress + + legacy, err := newLegacySystemSCProcessor(args) + if err != nil { + return nil, err } if check.IfNil(args.EnableEpochsHandler) { return nil, epochStart.ErrNilEnableEpochsHandler } - err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + + err = core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly, common.StakingV2OwnerFlagInSpecificEpochOnly, common.CorrectLastUnJailedFlagInSpecificEpochOnly, @@ -152,133 +76,90 @@ func NewSystemSCProcessor(args ArgsNewEpochStartSystemSCProcessing) (*systemSCPr common.ESDTFlagInSpecificEpochOnly, common.GovernanceFlag, common.SaveJailedAlwaysFlag, + common.StakingV4Step1Flag, + common.StakingV4Step2Flag, + common.StakingV4StartedFlag, + common.DelegationSmartContractFlagInSpecificEpochOnly, + common.GovernanceFlagInSpecificEpochOnly, }) if err != nil { return nil, err } s := &systemSCProcessor{ - systemVM: args.SystemVM, - userAccountsDB: args.UserAccountsDB, - peerAccountsDB: args.PeerAccountsDB, - marshalizer: args.Marshalizer, - startRating: args.StartRating, - validatorInfoCreator: args.ValidatorInfoCreator, - genesisNodesConfig: args.GenesisNodesConfig, - endOfEpochCallerAddress: args.EndOfEpochCallerAddress, - stakingSCAddress: args.StakingSCAddress, - chanceComputer: args.ChanceComputer, - mapNumSwitchedPerShard: make(map[uint32]uint32), - mapNumSwitchablePerShard: make(map[uint32]uint32), - stakingDataProvider: args.StakingDataProvider, - nodesConfigProvider: args.NodesConfigProvider, - shardCoordinator: args.ShardCoordinator, - esdtOwnerAddressBytes: args.ESDTOwnerAddressBytes, - enableEpochsHandler: args.EnableEpochsHandler, + legacySystemSCProcessor: legacy, + auctionListSelector: args.AuctionListSelector, + enableEpochsHandler: args.EnableEpochsHandler, } - s.maxNodesEnableConfig = make([]config.MaxNodesChangeConfig, len(args.MaxNodesEnableConfig)) - copy(s.maxNodesEnableConfig, args.MaxNodesEnableConfig) - sort.Slice(s.maxNodesEnableConfig, func(i, j int) bool { - return s.maxNodesEnableConfig[i].EpochEnable < s.maxNodesEnableConfig[j].EpochEnable - }) - args.EpochNotifier.RegisterNotifyHandler(s) return s, nil } // ProcessSystemSmartContract does all the processing at end of epoch in case of system smart contract func (s *systemSCProcessor) ProcessSystemSmartContract( - validatorInfos map[uint32][]*state.ValidatorInfo, - nonce uint64, - epoch uint32, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, ) error { - if s.enableEpochsHandler.IsFlagEnabled(common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly) { - err := s.updateSystemSCConfigMinNodes() - if err != nil { - return err - } + err := checkNilInputValues(validatorsInfoMap, header) + if err != nil { + return err } - if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2OwnerFlagInSpecificEpochOnly) { - err := s.updateOwnersForBlsKeys() - if err != nil { - return err - } + err = s.processLegacy(validatorsInfoMap, header.GetNonce(), header.GetEpoch()) + if err != nil { + return err } + return s.processWithNewFlags(validatorsInfoMap, header) +} - if s.flagChangeMaxNodesEnabled.IsSet() { - err := s.updateMaxNodes(validatorInfos, nonce) - if err != nil { - return err - } +func checkNilInputValues(validatorsInfoMap state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { + if check.IfNil(header) { + return process.ErrNilHeaderHandler } - - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlagInSpecificEpochOnly) { - err := s.resetLastUnJailed() - if err != nil { - return err - } + if validatorsInfoMap == nil { + return fmt.Errorf("systemSCProcessor.ProcessSystemSmartContract : %w, header nonce: %d ", + errNilValidatorsInfoMap, header.GetNonce()) } - if s.enableEpochsHandler.IsFlagEnabled(common.DelegationSmartContractFlag) { - err := s.initDelegationSystemSC() - if err != nil { - return err - } - } + return nil +} - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { - err := s.cleanAdditionalQueue() +func (s *systemSCProcessor) processWithNewFlags( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, +) error { + if s.enableEpochsHandler.IsFlagEnabled(common.GovernanceFlagInSpecificEpochOnly) { + err := s.updateToGovernanceV2() if err != nil { return err } } - if s.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) { - err := s.computeNumWaitingPerShard(validatorInfos) - if err != nil { - return err - } - - err = s.swapJailedWithWaiting(validatorInfos) + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + err := s.unStakeAllNodesFromQueue() if err != nil { return err } } - if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - err := s.prepareRewardsData(validatorInfos) - if err != nil { - return err - } - - err = s.fillStakingDataForNonEligible(validatorInfos) + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.prepareStakingDataForEligibleNodes(validatorsInfoMap) if err != nil { return err } - numUnStaked, err := s.unStakeNodesWithNotEnoughFunds(validatorInfos, epoch) + err = s.fillStakingDataForNonEligible(validatorsInfoMap) if err != nil { return err } - err = s.stakeNodesFromQueue(validatorInfos, numUnStaked, nonce) + err = s.unStakeNodesWithNotEnoughFundsWithStakingV4(validatorsInfoMap, header.GetEpoch()) if err != nil { return err } - } - - if s.enableEpochsHandler.IsFlagEnabled(common.ESDTFlagInSpecificEpochOnly) { - err := s.initESDT() - if err != nil { - //not a critical error - log.Error("error while initializing ESDT", "err", err) - } - } - if s.enableEpochsHandler.IsFlagEnabled(common.GovernanceFlag) { - err := s.updateToGovernanceV2() + err = s.auctionListSelector.SelectNodesFromAuctionList(validatorsInfoMap, header.GetPrevRandSeed()) if err != nil { return err } @@ -287,1162 +168,94 @@ func (s *systemSCProcessor) ProcessSystemSmartContract( return nil } -// ToggleUnStakeUnBond will pause/unPause the unStake/unBond functions on the validator system sc -func (s *systemSCProcessor) ToggleUnStakeUnBond(value bool) error { - if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - return nil - } - +func (s *systemSCProcessor) unStakeAllNodesFromQueue() error { vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: nil, + CallerAddr: vm.EndOfEpochAddress, CallValue: big.NewInt(0), + Arguments: [][]byte{}, }, - RecipientAddr: vm.ValidatorSCAddress, - Function: "unPauseUnStakeUnBond", - } - - if value { - vmInput.Function = "pauseUnStakeUnBond" + RecipientAddr: vm.StakingSCAddress, + Function: "unStakeAllNodesFromQueue", } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when unStaking all nodes from staking queue", errRun) } - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrSystemValidatorSCCall - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err + return fmt.Errorf("got return code %s when unStaking all nodes from staking queue", vmOutput.ReturnCode) } - return nil + return s.processSCOutputAccounts(vmOutput) } -func (s *systemSCProcessor) unStakeNodesWithNotEnoughFunds( - validatorInfos map[uint32][]*state.ValidatorInfo, +func (s *systemSCProcessor) unStakeNodesWithNotEnoughFundsWithStakingV4( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, epoch uint32, -) (uint32, error) { - nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorInfos) +) error { + nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorsInfoMap) if err != nil { - return 0, err + return err } - nodesUnStakedFromAdditionalQueue := uint32(0) - log.Debug("unStake nodes with not enough funds", "num", len(nodesToUnStake)) for _, blsKey := range nodesToUnStake { log.Debug("unStake at end of epoch for node", "blsKey", blsKey) err = s.unStakeOneNode(blsKey, epoch) - if err != nil { - return 0, err - } - - validatorInfo := getValidatorInfoWithBLSKey(validatorInfos, blsKey) - if validatorInfo == nil { - nodesUnStakedFromAdditionalQueue++ - log.Debug("unStaked node which was in additional queue", "blsKey", blsKey) - continue - } - - validatorInfo.List = string(common.LeavingList) - } - - err = s.updateDelegationContracts(mapOwnersKeys) - if err != nil { - return 0, err - } - - nodesToStakeFromQueue := uint32(len(nodesToUnStake)) - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { - nodesToStakeFromQueue -= nodesUnStakedFromAdditionalQueue - } - - log.Debug("stake nodes from waiting list", "num", nodesToStakeFromQueue) - return nodesToStakeFromQueue, nil -} - -func (s *systemSCProcessor) unStakeOneNode(blsKey []byte, epoch uint32) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{blsKey}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "unStakeAtEndOfEpoch", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - if vmOutput.ReturnCode != vmcommon.Ok { - log.Debug("unStakeOneNode", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) - return epochStart.ErrUnStakeExecuteError - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - account, errExists := s.peerAccountsDB.GetExistingAccount(blsKey) - if errExists != nil { - return nil - } - - peerAccount, ok := account.(state.PeerAccountHandler) - if !ok { - return epochStart.ErrWrongTypeAssertion - } - - peerAccount.SetListAndIndex(peerAccount.GetShardId(), string(common.LeavingList), peerAccount.GetIndexInList()) - peerAccount.SetUnStakedEpoch(epoch) - err = s.peerAccountsDB.SaveAccount(peerAccount) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) updateDelegationContracts(mapOwnerKeys map[string][][]byte) error { - sortedDelegationsSCs := make([]string, 0, len(mapOwnerKeys)) - for address := range mapOwnerKeys { - shardId := s.shardCoordinator.ComputeId([]byte(address)) - if shardId != core.MetachainShardId { - continue - } - sortedDelegationsSCs = append(sortedDelegationsSCs, address) - } - - sort.Slice(sortedDelegationsSCs, func(i, j int) bool { - return sortedDelegationsSCs[i] < sortedDelegationsSCs[j] - }) - - for _, address := range sortedDelegationsSCs { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: mapOwnerKeys[address], - CallValue: big.NewInt(0), - }, - RecipientAddr: []byte(address), - Function: "unStakeAtEndOfEpoch", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) if err != nil { return err } - if vmOutput.ReturnCode != vmcommon.Ok { - log.Debug("unStakeAtEndOfEpoch", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) - return epochStart.ErrUnStakeExecuteError - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - } - - return nil -} - -func getValidatorInfoWithBLSKey(validatorInfos map[uint32][]*state.ValidatorInfo, blsKey []byte) *state.ValidatorInfo { - for _, validatorsInfoSlice := range validatorInfos { - for _, validatorInfo := range validatorsInfoSlice { - if bytes.Equal(validatorInfo.PublicKey, blsKey) { - return validatorInfo - } - } - } - return nil -} - -func (s *systemSCProcessor) fillStakingDataForNonEligible(validatorInfos map[uint32][]*state.ValidatorInfo) error { - for shId, validatorsInfoSlice := range validatorInfos { - newList := make([]*state.ValidatorInfo, 0, len(validatorsInfoSlice)) - deleteCalled := false - - for _, validatorInfo := range validatorsInfoSlice { - if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { - newList = append(newList, validatorInfo) - continue - } - - err := s.stakingDataProvider.FillValidatorInfo(validatorInfo.PublicKey) - if err != nil { - deleteCalled = true - - log.Error("fillStakingDataForNonEligible", "error", err) - if len(validatorInfo.List) > 0 { - return err - } - - err = s.peerAccountsDB.RemoveAccount(validatorInfo.PublicKey) - if err != nil { - log.Error("fillStakingDataForNonEligible removeAccount", "error", err) - } - - continue - } - - newList = append(newList, validatorInfo) - } - - if deleteCalled { - validatorInfos[shId] = newList - } - } - - return nil -} - -func (s *systemSCProcessor) prepareRewardsData( - validatorsInfo map[uint32][]*state.ValidatorInfo, -) error { - eligibleNodesKeys := s.getEligibleNodesKeyMapOfType(validatorsInfo) - err := s.prepareStakingDataForRewards(eligibleNodesKeys) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) prepareStakingDataForRewards(eligibleNodesKeys map[uint32][][]byte) error { - sw := core.NewStopWatch() - sw.Start("prepareStakingDataForRewards") - defer func() { - sw.Stop("prepareStakingDataForRewards") - log.Debug("systemSCProcessor.prepareStakingDataForRewards time measurements", sw.GetMeasurements()...) - }() - - return s.stakingDataProvider.PrepareStakingDataForRewards(eligibleNodesKeys) -} - -func (s *systemSCProcessor) getEligibleNodesKeyMapOfType( - validatorsInfo map[uint32][]*state.ValidatorInfo, -) map[uint32][][]byte { - eligibleNodesKeys := make(map[uint32][][]byte) - for shardID, validatorsInfoSlice := range validatorsInfo { - eligibleNodesKeys[shardID] = make([][]byte, 0, s.nodesConfigProvider.ConsensusGroupSize(shardID)) - for _, validatorInfo := range validatorsInfoSlice { - if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { - eligibleNodesKeys[shardID] = append(eligibleNodesKeys[shardID], validatorInfo.PublicKey) - } - } - } - - return eligibleNodesKeys -} - -func getRewardsMiniBlockForMeta(miniBlocks block.MiniBlockSlice) *block.MiniBlock { - for _, miniBlock := range miniBlocks { - if miniBlock.Type != block.RewardsBlock { - continue - } - if miniBlock.ReceiverShardID != core.MetachainShardId { - continue - } - return miniBlock - } - return nil -} - -// ProcessDelegationRewards will process the rewards which are directed towards the delegation system smart contracts -func (s *systemSCProcessor) ProcessDelegationRewards( - miniBlocks block.MiniBlockSlice, - txCache epochStart.TransactionCacher, -) error { - if txCache == nil { - return epochStart.ErrNilLocalTxCache - } - - rwdMb := getRewardsMiniBlockForMeta(miniBlocks) - if rwdMb == nil { - return nil - } - for _, txHash := range rwdMb.TxHashes { - rwdTx, err := txCache.GetTx(txHash) - if err != nil { - return err + validatorInfo := validatorsInfoMap.GetValidator(blsKey) + if validatorInfo == nil { + return fmt.Errorf( + "%w in systemSCProcessor.unStakeNodesWithNotEnoughFundsWithStakingV4 because validator might be in additional queue after staking v4", + epochStart.ErrNilValidatorInfo) } - err = s.executeRewardTx(rwdTx) + validatorLeaving := validatorInfo.ShallowClone() + validatorLeaving.SetListAndIndex(string(common.LeavingList), validatorLeaving.GetIndex(), true) + err = validatorsInfoMap.Replace(validatorInfo, validatorLeaving) if err != nil { return err } } - return nil -} - -func (s *systemSCProcessor) executeRewardTx(rwdTx data.TransactionHandler) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: nil, - CallValue: rwdTx.GetValue(), - }, - RecipientAddr: rwdTx.GetRcvAddr(), - Function: "updateRewards", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrSystemDelegationCall - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil -} - -// updates the configuration of the system SC if the flags permit -func (s *systemSCProcessor) updateSystemSCConfigMinNodes() error { - minNumberOfNodesWithHysteresis := s.genesisNodesConfig.MinNumberOfNodesWithHysteresis() - err := s.setMinNumberOfNodes(minNumberOfNodesWithHysteresis) - - return err + return s.updateDelegationContracts(mapOwnersKeys) } -func (s *systemSCProcessor) resetLastUnJailed() error { +func (s *systemSCProcessor) updateToGovernanceV2() error { vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{}, + CallerAddr: vm.GovernanceSCAddress, CallValue: big.NewInt(0), + Arguments: [][]byte{}, }, - RecipientAddr: s.stakingSCAddress, - Function: "resetLastUnJailedFromQueue", + RecipientAddr: vm.GovernanceSCAddress, + Function: "initV2", } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when updating to governanceV2", errRun) } - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrResetLastUnJailedFromQueue - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err + return fmt.Errorf("got return code %s when updating to governanceV2", vmOutput.ReturnCode) } - return nil -} - -// updates the configuration of the system SC if the flags permit -func (s *systemSCProcessor) updateMaxNodes(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64) error { - sw := core.NewStopWatch() - sw.Start("total") - defer func() { - sw.Stop("total") - log.Debug("systemSCProcessor.updateMaxNodes", sw.GetMeasurements()...) - }() - - maxNumberOfNodes := s.maxNodes - sw.Start("setMaxNumberOfNodes") - prevMaxNumberOfNodes, err := s.setMaxNumberOfNodes(maxNumberOfNodes) - sw.Stop("setMaxNumberOfNodes") + err := s.processSCOutputAccounts(vmOutput) if err != nil { return err } - if maxNumberOfNodes < prevMaxNumberOfNodes { - return epochStart.ErrInvalidMaxNumberOfNodes - } - - sw.Start("stakeNodesFromQueue") - err = s.stakeNodesFromQueue(validatorInfos, maxNumberOfNodes-prevMaxNumberOfNodes, nonce) - sw.Stop("stakeNodesFromQueue") - if err != nil { - return err - } return nil } -func (s *systemSCProcessor) computeNumWaitingPerShard(validatorInfos map[uint32][]*state.ValidatorInfo) error { - for shardID, validatorInfoList := range validatorInfos { - totalInWaiting := uint32(0) - for _, validatorInfo := range validatorInfoList { - switch validatorInfo.List { - case string(common.WaitingList): - totalInWaiting++ - } - } - s.mapNumSwitchablePerShard[shardID] = totalInWaiting - s.mapNumSwitchedPerShard[shardID] = 0 - } - return nil -} - -func (s *systemSCProcessor) swapJailedWithWaiting(validatorInfos map[uint32][]*state.ValidatorInfo) error { - jailedValidators := s.getSortedJailedNodes(validatorInfos) - - log.Debug("number of jailed validators", "num", len(jailedValidators)) - - newValidators := make(map[string]struct{}) - for _, jailedValidator := range jailedValidators { - if _, ok := newValidators[string(jailedValidator.PublicKey)]; ok { - continue - } - if isValidator(jailedValidator) && s.mapNumSwitchablePerShard[jailedValidator.ShardId] <= s.mapNumSwitchedPerShard[jailedValidator.ShardId] { - log.Debug("cannot switch in this epoch anymore for this shard as switched num waiting", - "shardID", jailedValidator.ShardId, - "numSwitched", s.mapNumSwitchedPerShard[jailedValidator.ShardId]) - continue - } - - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{jailedValidator.PublicKey}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "switchJailedWithWaiting", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - log.Debug("switchJailedWithWaiting called for", - "key", jailedValidator.PublicKey, - "returnMessage", vmOutput.ReturnMessage) - if vmOutput.ReturnCode != vmcommon.Ok { - continue - } - - newValidator, err := s.stakingToValidatorStatistics(validatorInfos, jailedValidator, vmOutput) - if err != nil { - return err - } - - if len(newValidator) != 0 { - newValidators[string(newValidator)] = struct{}{} - } - } - - return nil -} - -func (s *systemSCProcessor) stakingToValidatorStatistics( - validatorInfos map[uint32][]*state.ValidatorInfo, - jailedValidator *state.ValidatorInfo, - vmOutput *vmcommon.VMOutput, -) ([]byte, error) { - stakingSCOutput, ok := vmOutput.OutputAccounts[string(s.stakingSCAddress)] - if !ok { - return nil, epochStart.ErrStakingSCOutputAccountNotFound - } - - var activeStorageUpdate *vmcommon.StorageUpdate - for _, storageUpdate := range stakingSCOutput.StorageUpdates { - isNewValidatorKey := len(storageUpdate.Offset) == len(jailedValidator.PublicKey) && - !bytes.Equal(storageUpdate.Offset, jailedValidator.PublicKey) - if isNewValidatorKey { - activeStorageUpdate = storageUpdate - break - } - } - if activeStorageUpdate == nil { - log.Debug("no one in waiting suitable for switch") - if s.enableEpochsHandler.IsFlagEnabled(common.SaveJailedAlwaysFlag) { - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return nil, err - } - } - - return nil, nil - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return nil, err - } - - var stakingData systemSmartContracts.StakedDataV2_0 - err = s.marshalizer.Unmarshal(&stakingData, activeStorageUpdate.Data) - if err != nil { - return nil, err - } - - blsPubKey := activeStorageUpdate.Offset - log.Debug("staking validator key who switches with the jailed one", "blsKey", blsPubKey) - - account, isNew, err := state.GetPeerAccountAndReturnIfNew(s.peerAccountsDB, blsPubKey) - if err != nil { - return nil, err - } - - if !bytes.Equal(account.GetRewardAddress(), stakingData.RewardAddress) { - err = account.SetRewardAddress(stakingData.RewardAddress) - if err != nil { - return nil, err - } - } - - if !isNew { - // old jailed validator getting switched back after unJail with stake - must remove first from exported map - deleteNewValidatorIfExistsFromMap(validatorInfos, blsPubKey, account.GetShardId()) - } - - account.SetListAndIndex(jailedValidator.ShardId, string(common.NewList), uint32(stakingData.StakedNonce)) - account.SetTempRating(s.startRating) - account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) - - err = s.peerAccountsDB.SaveAccount(account) - if err != nil { - return nil, err - } - - jailedAccount, err := s.getPeerAccount(jailedValidator.PublicKey) - if err != nil { - return nil, err - } - - jailedAccount.SetListAndIndex(jailedValidator.ShardId, string(common.JailedList), jailedValidator.Index) - jailedAccount.ResetAtNewEpoch() - err = s.peerAccountsDB.SaveAccount(jailedAccount) - if err != nil { - return nil, err - } - - if isValidator(jailedValidator) { - s.mapNumSwitchedPerShard[jailedValidator.ShardId]++ - } - - newValidatorInfo := s.validatorInfoCreator.PeerAccountToValidatorInfo(account) - switchJailedWithNewValidatorInMap(validatorInfos, jailedValidator, newValidatorInfo) - - return blsPubKey, nil -} - -func isValidator(validator *state.ValidatorInfo) bool { - return validator.List == string(common.WaitingList) || validator.List == string(common.EligibleList) -} - -func deleteNewValidatorIfExistsFromMap( - validatorInfos map[uint32][]*state.ValidatorInfo, - blsPubKey []byte, - shardID uint32, -) { - for index, validatorInfo := range validatorInfos[shardID] { - if bytes.Equal(validatorInfo.PublicKey, blsPubKey) { - length := len(validatorInfos[shardID]) - validatorInfos[shardID][index] = validatorInfos[shardID][length-1] - validatorInfos[shardID][length-1] = nil - validatorInfos[shardID] = validatorInfos[shardID][:length-1] - break - } - } -} - -func switchJailedWithNewValidatorInMap( - validatorInfos map[uint32][]*state.ValidatorInfo, - jailedValidator *state.ValidatorInfo, - newValidator *state.ValidatorInfo, -) { - for index, validatorInfo := range validatorInfos[jailedValidator.ShardId] { - if bytes.Equal(validatorInfo.PublicKey, jailedValidator.PublicKey) { - validatorInfos[jailedValidator.ShardId][index] = newValidator - break - } - } -} - -func (s *systemSCProcessor) getUserAccount(address []byte) (state.UserAccountHandler, error) { - acnt, err := s.userAccountsDB.LoadAccount(address) - if err != nil { - return nil, err - } - - stAcc, ok := acnt.(state.UserAccountHandler) - if !ok { - return nil, process.ErrWrongTypeAssertion - } - - return stAcc, nil -} - -// save account changes in state from vmOutput - protected by VM - every output can be treated as is. -func (s *systemSCProcessor) processSCOutputAccounts( - vmOutput *vmcommon.VMOutput, -) error { - - outputAccounts := process.SortVMOutputInsideData(vmOutput) - for _, outAcc := range outputAccounts { - acc, err := s.getUserAccount(outAcc.Address) - if err != nil { - return err - } - - storageUpdates := process.GetSortedStorageUpdates(outAcc) - for _, storeUpdate := range storageUpdates { - err = acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) - if err != nil { - return err - } - } - - if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { - err = acc.AddToBalance(outAcc.BalanceDelta) - if err != nil { - return err - } - } - - err = s.userAccountsDB.SaveAccount(acc) - if err != nil { - return err - } - } - - return nil -} - -func (s *systemSCProcessor) getSortedJailedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) []*state.ValidatorInfo { - newJailedValidators := make([]*state.ValidatorInfo, 0) - oldJailedValidators := make([]*state.ValidatorInfo, 0) - - minChance := s.chanceComputer.GetChance(0) - for _, listValidators := range validatorInfos { - for _, validatorInfo := range listValidators { - if validatorInfo.List == string(common.JailedList) { - oldJailedValidators = append(oldJailedValidators, validatorInfo) - } else if s.chanceComputer.GetChance(validatorInfo.TempRating) < minChance { - newJailedValidators = append(newJailedValidators, validatorInfo) - } - } - } - - sort.Sort(validatorList(oldJailedValidators)) - sort.Sort(validatorList(newJailedValidators)) - - return append(oldJailedValidators, newJailedValidators...) -} - -func (s *systemSCProcessor) getPeerAccount(key []byte) (state.PeerAccountHandler, error) { - account, err := s.peerAccountsDB.LoadAccount(key) - if err != nil { - return nil, err - } - - peerAcc, ok := account.(state.PeerAccountHandler) - if !ok { - return nil, epochStart.ErrWrongTypeAssertion - } - - return peerAcc, nil -} - -func (s *systemSCProcessor) setMinNumberOfNodes(minNumNodes uint32) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{big.NewInt(int64(minNumNodes)).Bytes()}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "updateConfigMinNodes", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - log.Debug("setMinNumberOfNodes called with", - "minNumNodes", minNumNodes, - "returnMessage", vmOutput.ReturnMessage) - - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrInvalidMinNumberOfNodes - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) setMaxNumberOfNodes(maxNumNodes uint32) (uint32, error) { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{big.NewInt(int64(maxNumNodes)).Bytes()}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "updateConfigMaxNodes", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return 0, err - } - - log.Debug("setMaxNumberOfNodes called with", - "maxNumNodes", maxNumNodes, - "returnMessage", vmOutput.ReturnMessage) - - if vmOutput.ReturnCode != vmcommon.Ok { - return 0, epochStart.ErrInvalidMaxNumberOfNodes - } - if len(vmOutput.ReturnData) != 1 { - return 0, epochStart.ErrInvalidSystemSCReturn - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return 0, err - } - - prevMaxNumNodes := big.NewInt(0).SetBytes(vmOutput.ReturnData[0]).Uint64() - return uint32(prevMaxNumNodes), nil -} - -func (s *systemSCProcessor) updateOwnersForBlsKeys() error { - sw := core.NewStopWatch() - sw.Start("systemSCProcessor") - defer func() { - sw.Stop("systemSCProcessor") - log.Debug("systemSCProcessor.updateOwnersForBlsKeys time measurements", sw.GetMeasurements()...) - }() - - sw.Start("getValidatorSystemAccount") - userValidatorAccount, err := s.getValidatorSystemAccount() - sw.Stop("getValidatorSystemAccount") - if err != nil { - return err - } - - sw.Start("getArgumentsForSetOwnerFunctionality") - arguments, err := s.getArgumentsForSetOwnerFunctionality(userValidatorAccount) - sw.Stop("getArgumentsForSetOwnerFunctionality") - if err != nil { - return err - } - - sw.Start("callSetOwnersOnAddresses") - err = s.callSetOwnersOnAddresses(arguments) - sw.Stop("callSetOwnersOnAddresses") - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) updateToGovernanceV2() error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.GovernanceSCAddress, - CallValue: big.NewInt(0), - Arguments: [][]byte{}, - }, - RecipientAddr: vm.GovernanceSCAddress, - Function: "initV2", - } - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when updating to governanceV2", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s when updating to governanceV2", vmOutput.ReturnCode) - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) getValidatorSystemAccount() (state.UserAccountHandler, error) { - validatorAccount, err := s.userAccountsDB.LoadAccount(vm.ValidatorSCAddress) - if err != nil { - return nil, fmt.Errorf("%w when loading validator account", err) - } - - userValidatorAccount, ok := validatorAccount.(state.UserAccountHandler) - if !ok { - return nil, fmt.Errorf("%w when loading validator account", epochStart.ErrWrongTypeAssertion) - } - - if check.IfNil(userValidatorAccount.DataTrie()) { - return nil, epochStart.ErrNilDataTrie - } - - return userValidatorAccount, nil -} - -func (s *systemSCProcessor) getArgumentsForSetOwnerFunctionality(userValidatorAccount state.UserAccountHandler) ([][]byte, error) { - arguments := make([][]byte, 0) - - leavesChannels := &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChanWrapper(), - } - err := userValidatorAccount.GetAllLeaves(leavesChannels, context.Background()) - if err != nil { - return nil, err - } - for leaf := range leavesChannels.LeavesChan { - validatorData := &systemSmartContracts.ValidatorDataV2{} - - err = s.marshalizer.Unmarshal(validatorData, leaf.Value()) - if err != nil { - continue - } - for _, blsKey := range validatorData.BlsPubKeys { - arguments = append(arguments, blsKey) - arguments = append(arguments, leaf.Key()) - } - } - - err = leavesChannels.ErrChan.ReadFromChanNonBlocking() - if err != nil { - return nil, err - } - - return arguments, nil -} - -func (s *systemSCProcessor) callSetOwnersOnAddresses(arguments [][]byte) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.EndOfEpochAddress, - CallValue: big.NewInt(0), - Arguments: arguments, - }, - RecipientAddr: vm.StakingSCAddress, - Function: "setOwnersOnAddresses", - } - - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when calling setOwnersOnAddresses function", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s when calling setOwnersOnAddresses", vmOutput.ReturnCode) - } - - return s.processSCOutputAccounts(vmOutput) -} - -func (s *systemSCProcessor) initDelegationSystemSC() error { - codeMetaData := &vmcommon.CodeMetadata{ - Upgradeable: false, - Payable: false, - Readable: true, - } - - vmInput := &vmcommon.ContractCreateInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.DelegationManagerSCAddress, - Arguments: [][]byte{}, - CallValue: big.NewInt(0), - }, - ContractCode: vm.DelegationManagerSCAddress, - ContractCodeMetadata: codeMetaData.ToBytes(), - } - - vmOutput, err := s.systemVM.RunSmartContractCreate(vmInput) - if err != nil { - return err - } - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrCouldNotInitDelegationSystemSC - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - err = s.updateSystemSCContractsCode(vmInput.ContractCodeMetadata) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) updateSystemSCContractsCode(contractMetadata []byte) error { - contractsToUpdate := make([][]byte, 0) - contractsToUpdate = append(contractsToUpdate, vm.StakingSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.ValidatorSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.GovernanceSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.ESDTSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.DelegationManagerSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.FirstDelegationSCAddress) - - for _, address := range contractsToUpdate { - userAcc, err := s.getUserAccount(address) - if err != nil { - return err - } - - userAcc.SetOwnerAddress(address) - userAcc.SetCodeMetadata(contractMetadata) - userAcc.SetCode(address) - - err = s.userAccountsDB.SaveAccount(userAcc) - if err != nil { - return err - } - } - - return nil -} - -func (s *systemSCProcessor) cleanAdditionalQueue() error { - sw := core.NewStopWatch() - sw.Start("systemSCProcessor") - defer func() { - sw.Stop("systemSCProcessor") - log.Debug("systemSCProcessor.cleanAdditionalQueue time measurements", sw.GetMeasurements()...) - }() - - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.EndOfEpochAddress, - CallValue: big.NewInt(0), - Arguments: [][]byte{}, - }, - RecipientAddr: vm.StakingSCAddress, - Function: "cleanAdditionalQueue", - } - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when cleaning additional queue", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s, return message %s when cleaning additional queue", vmOutput.ReturnCode, vmOutput.ReturnMessage) - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - // returnData format is list(address - all blsKeys which were unstaked for that) - addressLength := len(s.endOfEpochCallerAddress) - mapOwnersKeys := make(map[string][][]byte) - currentOwner := "" - for _, returnData := range vmOutput.ReturnData { - if len(returnData) == addressLength { - currentOwner = string(returnData) - continue - } - - if len(currentOwner) != addressLength { - continue - } - - mapOwnersKeys[currentOwner] = append(mapOwnersKeys[currentOwner], returnData) - } - - err = s.updateDelegationContracts(mapOwnersKeys) - if err != nil { - log.Error("update delegation contracts failed after cleaning additional queue", "error", err.Error()) - return err - } - - return nil -} - -func (s *systemSCProcessor) stakeNodesFromQueue( - validatorInfos map[uint32][]*state.ValidatorInfo, - nodesToStake uint32, - nonce uint64, -) error { - if nodesToStake == 0 { - return nil - } - - nodesToStakeAsBigInt := big.NewInt(0).SetUint64(uint64(nodesToStake)) - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.EndOfEpochAddress, - CallValue: big.NewInt(0), - Arguments: [][]byte{nodesToStakeAsBigInt.Bytes()}, - }, - RecipientAddr: vm.StakingSCAddress, - Function: "stakeNodesFromQueue", - } - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when staking nodes from waiting list", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s when staking nodes from waiting list", vmOutput.ReturnCode) - } - if len(vmOutput.ReturnData)%2 != 0 { - return fmt.Errorf("%w return data must be divisible by 2 when staking nodes from waiting list", epochStart.ErrInvalidSystemSCReturn) - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - err = s.addNewlyStakedNodesToValidatorTrie(validatorInfos, vmOutput.ReturnData, nonce) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) addNewlyStakedNodesToValidatorTrie( - validatorInfos map[uint32][]*state.ValidatorInfo, - returnData [][]byte, - nonce uint64, -) error { - for i := 0; i < len(returnData); i += 2 { - blsKey := returnData[i] - rewardAddress := returnData[i+1] - - peerAcc, err := s.getPeerAccount(blsKey) - if err != nil { - return err - } - - err = peerAcc.SetRewardAddress(rewardAddress) - if err != nil { - return err - } - - peerAcc.SetListAndIndex(peerAcc.GetShardId(), string(common.NewList), uint32(nonce)) - peerAcc.SetTempRating(s.startRating) - peerAcc.SetUnStakedEpoch(common.DefaultUnstakedEpoch) - - err = s.peerAccountsDB.SaveAccount(peerAcc) - if err != nil { - return err - } - - validatorInfo := &state.ValidatorInfo{ - PublicKey: blsKey, - ShardId: peerAcc.GetShardId(), - List: string(common.NewList), - Index: uint32(nonce), - TempRating: s.startRating, - Rating: s.startRating, - RewardAddress: rewardAddress, - AccumulatedFees: big.NewInt(0), - } - validatorInfos[peerAcc.GetShardId()] = append(validatorInfos[peerAcc.GetShardId()], validatorInfo) - } - - return nil -} - -func (s *systemSCProcessor) initESDT() error { - currentConfigValues, err := s.extractConfigFromESDTContract() - if err != nil { - return err - } - - return s.changeESDTOwner(currentConfigValues) -} - -func (s *systemSCProcessor) extractConfigFromESDTContract() ([][]byte, error) { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{}, - CallValue: big.NewInt(0), - GasProvided: math.MaxInt64, - }, - Function: "getContractConfig", - RecipientAddr: vm.ESDTSCAddress, - } - - output, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return nil, err - } - if len(output.ReturnData) != 4 { - return nil, fmt.Errorf("%w getContractConfig should have returned 4 values", epochStart.ErrInvalidSystemSCReturn) - } - - return output.ReturnData, nil -} - -func (s *systemSCProcessor) changeESDTOwner(currentConfigValues [][]byte) error { - baseIssuingCost := currentConfigValues[1] - minTokenNameLength := currentConfigValues[2] - maxTokenNameLength := currentConfigValues[3] - - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{s.esdtOwnerAddressBytes, baseIssuingCost, minTokenNameLength, maxTokenNameLength}, - CallValue: big.NewInt(0), - GasProvided: math.MaxInt64, - }, - Function: "configChange", - RecipientAddr: vm.ESDTSCAddress, - } - - output, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - if output.ReturnCode != vmcommon.Ok { - return fmt.Errorf("%w changeESDTOwner should have returned Ok", epochStart.ErrInvalidSystemSCReturn) - } - - return s.processSCOutputAccounts(output) -} - -// IsInterfaceNil returns true if underlying object is nil -func (s *systemSCProcessor) IsInterfaceNil() bool { - return s == nil +// IsInterfaceNil returns true if underlying object is nil +func (s *systemSCProcessor) IsInterfaceNil() bool { + return s == nil } // EpochConfirmed is called whenever a new epoch is confirmed func (s *systemSCProcessor) EpochConfirmed(epoch uint32, _ uint64) { - s.flagChangeMaxNodesEnabled.SetValue(false) - for _, maxNodesConfig := range s.maxNodesEnableConfig { - if epoch == maxNodesConfig.EpochEnable { - s.flagChangeMaxNodesEnabled.SetValue(true) - s.maxNodes = maxNodesConfig.MaxNumNodes - break - } - } - - log.Debug("systemSCProcessor: change of maximum number of nodes and/or shuffling percentage", - "enabled", s.flagChangeMaxNodesEnabled.IsSet(), - "epoch", epoch, - "maxNodes", s.maxNodes, - ) + s.legacyEpochConfirmed(epoch) } diff --git a/epochStart/metachain/systemSCs_test.go b/epochStart/metachain/systemSCs_test.go index d29dd0d8d09..1eadb05f231 100644 --- a/epochStart/metachain/systemSCs_test.go +++ b/epochStart/metachain/systemSCs_test.go @@ -8,7 +8,7 @@ import ( "math" "math/big" "os" - "strconv" + "strings" "testing" "github.com/multiversx/mx-chain-core-go/core" @@ -27,9 +27,9 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/mock" + "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/genesis/process/disabled" "github.com/multiversx/mx-chain-go/process" - economicsHandler "github.com/multiversx/mx-chain-go/process/economics" vmFactory "github.com/multiversx/mx-chain-go/process/factory" metaProcess "github.com/multiversx/mx-chain-go/process/factory/metachain" "github.com/multiversx/mx-chain-go/process/peer" @@ -48,7 +48,11 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageMock "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" @@ -87,17 +91,39 @@ func createPhysicalUnit(t *testing.T) (storage.Storer, string) { MaxOpenFiles: 10, } - dbConfigHandler := storageFactory.NewDBConfigHandler(dbConfig) - persisterFactory, err := storageFactory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := storageFactory.NewPersisterFactory(dbConfig) assert.Nil(t, err) cache, _ := storageunit.NewCache(cacheConfig) - persist, _ := storageunit.NewDB(persisterFactory, dir) + persist, _ := persisterFactory.CreateWithRetries(dir) unit, _ := storageunit.NewStorageUnit(cache, persist) return unit, dir } +func createMockArgsForSystemSCProcessor() ArgsNewEpochStartSystemSCProcessing { + return ArgsNewEpochStartSystemSCProcessing{ + SystemVM: &mock.VMExecutionHandlerStub{}, + UserAccountsDB: &stateMock.AccountsStub{}, + PeerAccountsDB: &stateMock.AccountsStub{}, + Marshalizer: &marshallerMock.MarshalizerStub{}, + StartRating: 0, + ValidatorInfoCreator: &testscommon.ValidatorStatisticsProcessorStub{}, + ChanceComputer: &mock.ChanceComputerStub{}, + ShardCoordinator: &testscommon.ShardsCoordinatorMock{}, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ESDTOwnerAddressBytes: vm.ESDTSCAddress, + GenesisNodesConfig: &genesisMocks.NodesSetupStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + NodesConfigProvider: &shardingMocks.NodesCoordinatorStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, + AuctionListSelector: &stakingcommon.AuctionListSelectorStub{}, + MaxNodesChangeConfigProvider: &testscommon.MaxNodesChangeConfigProviderStub{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + } +} + func TestNewSystemSCProcessor(t *testing.T) { t.Parallel() @@ -198,7 +224,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContract(t *testing.T) { jailedAcc, _ := args.PeerAccountsDB.LoadAccount([]byte("jailedPubKey0")) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) + validatorsInfo := state.NewShardValidatorsInfoMap() vInfo := &state.ValidatorInfo{ PublicKey: []byte("jailedPubKey0"), ShardId: 0, @@ -207,13 +233,13 @@ func TestSystemSCProcessor_ProcessSystemSmartContract(t *testing.T) { RewardAddress: []byte("address"), AccumulatedFees: big.NewInt(0), } - validatorInfos[0] = append(validatorInfos[0], vInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) - assert.Nil(t, err) + _ = validatorsInfo.Add(vInfo) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) - assert.Equal(t, len(validatorInfos[0]), 1) - newValidatorInfo := validatorInfos[0][0] - assert.Equal(t, newValidatorInfo.List, string(common.NewList)) + require.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 1) + newValidatorInfo := validatorsInfo.GetShardValidatorsInfoMap()[0][0] + require.Equal(t, newValidatorInfo.GetList(), string(common.NewList)) } func TestSystemSCProcessor_JailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T) { @@ -243,7 +269,7 @@ func testSystemSCProcessorJailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T, s numEligible := 9 numWaiting := 5 numJailed := 8 - stakingScAcc := loadSCAccount(args.UserAccountsDB, vm.StakingSCAddress) + stakingScAcc := stakingcommon.LoadUserAccount(args.UserAccountsDB, vm.StakingSCAddress) createEligibleNodes(numEligible, stakingScAcc, args.Marshalizer) _ = createWaitingNodes(numWaiting, stakingScAcc, args.UserAccountsDB, args.Marshalizer) jailed := createJailedNodes(numJailed, stakingScAcc, args.UserAccountsDB, args.PeerAccountsDB, args.Marshalizer) @@ -251,25 +277,25 @@ func testSystemSCProcessorJailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T, s _ = s.userAccountsDB.SaveAccount(stakingScAcc) _, _ = s.userAccountsDB.Commit() - addValidatorData(args.UserAccountsDB, []byte("ownerForAll"), [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, big.NewInt(900000), args.Marshalizer) + stakingcommon.AddValidatorData(args.UserAccountsDB, []byte("ownerForAll"), [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, big.NewInt(900000), args.Marshalizer) - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) - validatorsInfo[0] = append(validatorsInfo[0], jailed...) + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.SetValidatorsInShard(0, jailed) - err := s.ProcessSystemSmartContract(validatorsInfo, 0, 0) - assert.Nil(t, err) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) for i := 0; i < numWaiting; i++ { - assert.Equal(t, string(common.NewList), validatorsInfo[0][i].List) + require.Equal(t, string(common.NewList), validatorsInfo.GetShardValidatorsInfoMap()[0][i].GetList()) } for i := numWaiting; i < numJailed; i++ { - assert.Equal(t, string(common.JailedList), validatorsInfo[0][i].List) + require.Equal(t, string(common.JailedList), validatorsInfo.GetShardValidatorsInfoMap()[0][i].GetList()) } newJailedNodes := jailed[numWaiting:numJailed] checkNodesStatusInSystemSCDataTrie(t, newJailedNodes, args.UserAccountsDB, args.Marshalizer, saveJailedAlwaysEnableEpoch == 0) } -func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []*state.ValidatorInfo, accounts state.AccountsAdapter, marshalizer marshal.Marshalizer, jailed bool) { +func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []state.ValidatorInfoHandler, accounts state.AccountsAdapter, marshalizer marshal.Marshalizer, jailed bool) { account, err := accounts.LoadAccount(vm.StakingSCAddress) require.Nil(t, err) @@ -277,7 +303,7 @@ func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []*state.ValidatorIn systemScAccount, ok := account.(state.UserAccountHandler) require.True(t, ok) for _, nodeInfo := range nodes { - buff, _, err = systemScAccount.RetrieveValue(nodeInfo.PublicKey) + buff, _, err = systemScAccount.RetrieveValue(nodeInfo.GetPublicKey()) require.Nil(t, err) require.True(t, len(buff) > 0) @@ -315,7 +341,7 @@ func TestSystemSCProcessor_NobodyToSwapWithStakingV2(t *testing.T) { _ = s.initDelegationSystemSC() doStake(t, s.systemVM, s.userAccountsDB, owner1, big.NewInt(1000), blsKeys...) doUnStake(t, s.systemVM, s.userAccountsDB, owner1, blsKeys[:3]...) - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) + validatorsInfo := state.NewShardValidatorsInfoMap() jailed := &state.ValidatorInfo{ PublicKey: blsKeys[0], ShardId: 0, @@ -324,13 +350,13 @@ func TestSystemSCProcessor_NobodyToSwapWithStakingV2(t *testing.T) { RewardAddress: []byte("owner1"), AccumulatedFees: big.NewInt(0), } - validatorsInfo[0] = append(validatorsInfo[0], jailed) + _ = validatorsInfo.Add(jailed) - err := s.ProcessSystemSmartContract(validatorsInfo, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - for _, vInfo := range validatorsInfo[0] { - assert.Equal(t, string(common.JailedList), vInfo.List) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.Equal(t, string(common.JailedList), vInfo.GetList()) } nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorsInfo) @@ -541,13 +567,6 @@ func doUnStake(t *testing.T, systemVm vmcommon.VMExecutionHandler, accountsDB st saveOutputAccounts(t, accountsDB, vmOutput) } -func loadSCAccount(accountsDB state.AccountsAdapter, address []byte) state.UserAccountHandler { - acc, _ := accountsDB.LoadAccount(address) - stakingSCAcc := acc.(state.UserAccountHandler) - - return stakingSCAcc -} - func createEligibleNodes(numNodes int, stakingSCAcc state.UserAccountHandler, marshalizer marshal.Marshalizer) { for i := 0; i < numNodes; i++ { stakedData := &systemSmartContracts.StakedDataV2_0{ @@ -563,8 +582,8 @@ func createEligibleNodes(numNodes int, stakingSCAcc state.UserAccountHandler, ma } } -func createJailedNodes(numNodes int, stakingSCAcc state.UserAccountHandler, userAccounts state.AccountsAdapter, peerAccounts state.AccountsAdapter, marshalizer marshal.Marshalizer) []*state.ValidatorInfo { - validatorInfos := make([]*state.ValidatorInfo, 0) +func createJailedNodes(numNodes int, stakingSCAcc state.UserAccountHandler, userAccounts state.AccountsAdapter, peerAccounts state.AccountsAdapter, marshalizer marshal.Marshalizer) []state.ValidatorInfoHandler { + validatorInfos := make([]state.ValidatorInfoHandler, 0) for i := 0; i < numNodes; i++ { stakedData := &systemSmartContracts.StakedDataV2_0{ @@ -603,8 +622,8 @@ func addValidatorDataWithUnStakedKey( nodePrice *big.Int, marshalizer marshal.Marshalizer, ) { - stakingAccount := loadSCAccount(accountsDB, vm.StakingSCAddress) - validatorAccount := loadSCAccount(accountsDB, vm.ValidatorSCAddress) + stakingAccount := stakingcommon.LoadUserAccount(accountsDB, vm.StakingSCAddress) + validatorAccount := stakingcommon.LoadUserAccount(accountsDB, vm.ValidatorSCAddress) validatorData := &systemSmartContracts.ValidatorDataV2{ RegisterNonce: 0, @@ -705,50 +724,6 @@ func createWaitingNodes(numNodes int, stakingSCAcc state.UserAccountHandler, use return validatorInfos } -func addValidatorData( - accountsDB state.AccountsAdapter, - ownerKey []byte, - registeredKeys [][]byte, - totalStake *big.Int, - marshalizer marshal.Marshalizer, -) { - validatorSC := loadSCAccount(accountsDB, vm.ValidatorSCAddress) - validatorData := &systemSmartContracts.ValidatorDataV2{ - RegisterNonce: 0, - Epoch: 0, - RewardAddress: ownerKey, - TotalStakeValue: totalStake, - LockedStake: big.NewInt(0), - TotalUnstaked: big.NewInt(0), - BlsPubKeys: registeredKeys, - NumRegistered: uint32(len(registeredKeys)), - } - - marshaledData, _ := marshalizer.Marshal(validatorData) - _ = validatorSC.SaveKeyValue(ownerKey, marshaledData) - - _ = accountsDB.SaveAccount(validatorSC) -} - -func addStakedData( - accountsDB state.AccountsAdapter, - stakedKey []byte, - ownerKey []byte, - marshalizer marshal.Marshalizer, -) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - stakedData := &systemSmartContracts.StakedDataV2_0{ - Staked: true, - RewardAddress: ownerKey, - OwnerAddress: ownerKey, - StakeValue: big.NewInt(0), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(stakedKey, marshaledData) - - _ = accountsDB.SaveAccount(stakingSCAcc) -} - func prepareStakingContractWithData( accountsDB state.AccountsAdapter, stakedKey []byte, @@ -757,139 +732,14 @@ func prepareStakingContractWithData( rewardAddress []byte, ownerAddress []byte, ) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - - stakedData := &systemSmartContracts.StakedDataV2_0{ - Staked: true, - RewardAddress: rewardAddress, - OwnerAddress: ownerAddress, - StakeValue: big.NewInt(100), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(stakedKey, marshaledData) - _ = accountsDB.SaveAccount(stakingSCAcc) + stakingcommon.AddStakingData(accountsDB, ownerAddress, rewardAddress, [][]byte{stakedKey}, marshalizer) + stakingcommon.AddKeysToWaitingList(accountsDB, [][]byte{waitingKey}, marshalizer, rewardAddress, ownerAddress) + stakingcommon.AddValidatorData(accountsDB, rewardAddress, [][]byte{stakedKey, waitingKey}, big.NewInt(10000000000), marshalizer) - saveOneKeyToWaitingList(accountsDB, waitingKey, marshalizer, rewardAddress, ownerAddress) - - validatorSC := loadSCAccount(accountsDB, vm.ValidatorSCAddress) - validatorData := &systemSmartContracts.ValidatorDataV2{ - RegisterNonce: 0, - Epoch: 0, - RewardAddress: rewardAddress, - TotalStakeValue: big.NewInt(10000000000), - LockedStake: big.NewInt(10000000000), - TotalUnstaked: big.NewInt(0), - NumRegistered: 2, - BlsPubKeys: [][]byte{stakedKey, waitingKey}, - } - - marshaledData, _ = marshalizer.Marshal(validatorData) - _ = validatorSC.SaveKeyValue(rewardAddress, marshaledData) - - _ = accountsDB.SaveAccount(validatorSC) _, err := accountsDB.Commit() log.LogIfError(err) } -func saveOneKeyToWaitingList( - accountsDB state.AccountsAdapter, - waitingKey []byte, - marshalizer marshal.Marshalizer, - rewardAddress []byte, - ownerAddress []byte, -) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - stakedData := &systemSmartContracts.StakedDataV2_0{ - Waiting: true, - RewardAddress: rewardAddress, - OwnerAddress: ownerAddress, - StakeValue: big.NewInt(100), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(waitingKey, marshaledData) - - waitingKeyInList := []byte("w_" + string(waitingKey)) - waitingListHead := &systemSmartContracts.WaitingList{ - FirstKey: waitingKeyInList, - LastKey: waitingKeyInList, - Length: 1, - } - marshaledData, _ = marshalizer.Marshal(waitingListHead) - _ = stakingSCAcc.SaveKeyValue([]byte("waitingList"), marshaledData) - - waitingListElement := &systemSmartContracts.ElementInList{ - BLSPublicKey: waitingKey, - PreviousKey: waitingKeyInList, - NextKey: make([]byte, 0), - } - marshaledData, _ = marshalizer.Marshal(waitingListElement) - _ = stakingSCAcc.SaveKeyValue(waitingKeyInList, marshaledData) - - _ = accountsDB.SaveAccount(stakingSCAcc) -} - -func addKeysToWaitingList( - accountsDB state.AccountsAdapter, - waitingKeys [][]byte, - marshalizer marshal.Marshalizer, - rewardAddress []byte, - ownerAddress []byte, -) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - - for _, waitingKey := range waitingKeys { - stakedData := &systemSmartContracts.StakedDataV2_0{ - Waiting: true, - RewardAddress: rewardAddress, - OwnerAddress: ownerAddress, - StakeValue: big.NewInt(100), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(waitingKey, marshaledData) - } - - marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) - waitingListHead := &systemSmartContracts.WaitingList{} - _ = marshalizer.Unmarshal(waitingListHead, marshaledData) - waitingListHead.Length += uint32(len(waitingKeys)) - lastKeyInList := []byte("w_" + string(waitingKeys[len(waitingKeys)-1])) - waitingListHead.LastKey = lastKeyInList - - marshaledData, _ = marshalizer.Marshal(waitingListHead) - _ = stakingSCAcc.SaveKeyValue([]byte("waitingList"), marshaledData) - - numWaitingKeys := len(waitingKeys) - previousKey := waitingListHead.FirstKey - for i, waitingKey := range waitingKeys { - - waitingKeyInList := []byte("w_" + string(waitingKey)) - waitingListElement := &systemSmartContracts.ElementInList{ - BLSPublicKey: waitingKey, - PreviousKey: previousKey, - NextKey: make([]byte, 0), - } - - if i < numWaitingKeys-1 { - nextKey := []byte("w_" + string(waitingKeys[i+1])) - waitingListElement.NextKey = nextKey - } - - marshaledData, _ = marshalizer.Marshal(waitingListElement) - _ = stakingSCAcc.SaveKeyValue(waitingKeyInList, marshaledData) - - previousKey = waitingKeyInList - } - - marshaledData, _, _ = stakingSCAcc.RetrieveValue(waitingListHead.FirstKey) - waitingListElement := &systemSmartContracts.ElementInList{} - _ = marshalizer.Unmarshal(waitingListElement, marshaledData) - waitingListElement.NextKey = []byte("w_" + string(waitingKeys[0])) - marshaledData, _ = marshalizer.Marshal(waitingListElement) - _ = stakingSCAcc.SaveKeyValue(waitingListHead.FirstKey, marshaledData) - - _ = accountsDB.SaveAccount(stakingSCAcc) -} - func createAccountsDB( hasher hashing.Hasher, marshaller marshal.Marshalizer, @@ -935,6 +785,9 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp accCreator, _ := factory.NewAccountCreator(argsAccCreator) peerAccCreator := factory.NewPeerAccountCreator() en := forking.NewGenericEpochNotifier() + enableEpochsConfig.StakeLimitsEnableEpoch = 10 + enableEpochsConfig.StakingV4Step1EnableEpoch = 444 + enableEpochsConfig.StakingV4Step2EnableEpoch = 445 epochsConfig := &config.EpochConfig{ EnableEpochs: enableEpochsConfig, } @@ -952,7 +805,7 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp PeerAdapter: peerAccountsDB, Rater: &mock.RaterStub{}, RewardsHandler: &mock.RewardsHandlerStub{}, - NodesSetup: &mock.NodesSetupStub{}, + NodesSetup: &genesisMocks.NodesSetupStub{}, MaxComputableRounds: 1, MaxConsecutiveRoundsOfRatingDecrease: 2000, EnableEpochsHandler: enableEpochsHandler, @@ -960,15 +813,14 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp vCreator, _ := peer.NewValidatorStatisticsProcessor(argsValidatorsProcessor) blockChain, _ := blockchain.NewMetaChain(&statusHandlerMock.AppStatusHandlerStub{}) + gasSchedule := wasmConfig.MakeGasMapForTests() + gasScheduleNotifier := testscommon.NewGasScheduleNotifierMock(gasSchedule) testDataPool := dataRetrieverMock.NewPoolsHolderMock() - gasSchedule := wasmConfig.MakeGasMapForTests() defaults.FillGasMapInternal(gasSchedule, 1) signVerifer, _ := disabled.NewMessageSignVerifier(&cryptoMocks.KeyGenStub{}) - gasScheduleNotifier := testscommon.NewGasScheduleNotifierMock(gasSchedule) - - nodesSetup := &mock.NodesSetupStub{} + nodesSetup := &genesisMocks.NodesSetupStub{} argsHook := hooks.ArgBlockChainHook{ Accounts: userAccountsDB, @@ -978,10 +830,10 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp ShardCoordinator: &mock.ShardCoordinatorStub{}, Marshalizer: marshalizer, Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, - GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, + BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), DataPool: testDataPool, + GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, CompiledSCPool: testDataPool.SmartContracts(), EpochNotifier: en, EnableEpochsHandler: enableEpochsHandler, @@ -991,11 +843,13 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } + defaults.FillGasMapInternal(gasSchedule, 1) + blockChainHookImpl, _ := hooks.NewBlockChainHookImpl(argsHook) argsNewVMContainerFactory := metaProcess.ArgsNewVMContainerFactory{ BlockChainHook: blockChainHookImpl, PubkeyConv: argsHook.PubkeyConv, - Economics: createEconomicsData(), + Economics: stakingcommon.CreateEconomicsData(), MessageSignVerifier: signVerifer, GasSchedule: gasScheduleNotifier, NodesConfigProvider: nodesSetup, @@ -1032,6 +886,8 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MaxNumberOfNodesForStake: 5, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -1042,21 +898,57 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: peerAccountsDB, UserAccountsDB: userAccountsDB, ChanceComputer: &mock.ChanceComputerStub{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: enableEpochsHandler, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + ArgBlockChainHook: argsHook, } metaVmFactory, _ := metaProcess.NewVMContainerFactory(argsNewVMContainerFactory) vmContainer, _ := metaVmFactory.Create() systemVM, _ := vmContainer.Get(vmFactory.SystemVirtualMachine) - stakingSCprovider, _ := NewStakingDataProvider(systemVM, "1000") + argsStakingDataProvider := StakingDataProviderArgs{ + EnableEpochsHandler: enableEpochsHandler, + SystemVM: systemVM, + MinNodePrice: "1000", + } + stakingSCProvider, _ := NewStakingDataProvider(argsStakingDataProvider) shardCoordinator, _ := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(en, nil) + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + Denomination: 0, + }) + argsAuctionListSelector := AuctionListSelectorArgs{ + ShardCoordinator: shardCoordinator, + StakingDataProvider: stakingSCProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + als, _ := NewAuctionListSelector(argsAuctionListSelector) + args := ArgsNewEpochStartSystemSCProcessing{ SystemVM: systemVM, UserAccountsDB: userAccountsDB, @@ -1069,7 +961,8 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp ChanceComputer: &mock.ChanceComputerStub{}, EpochNotifier: en, GenesisNodesConfig: nodesSetup, - StakingDataProvider: stakingSCprovider, + StakingDataProvider: stakingSCProvider, + AuctionListSelector: als, NodesConfigProvider: &shardingMocks.NodesCoordinatorStub{ ConsensusGroupSizeCalled: func(shardID uint32) int { if shardID == core.MetachainShardId { @@ -1078,87 +971,189 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp return 63 }, }, - ShardCoordinator: shardCoordinator, - ESDTOwnerAddressBytes: bytes.Repeat([]byte{1}, 32), - EnableEpochsHandler: enableEpochsHandler, + ShardCoordinator: shardCoordinator, + ESDTOwnerAddressBytes: bytes.Repeat([]byte{1}, 32), + MaxNodesChangeConfigProvider: nodesConfigProvider, + EnableEpochsHandler: enableEpochsHandler, } return args, metaVmFactory.SystemSmartContractContainer() } -func createEconomicsData() process.EconomicsDataHandler { - maxGasLimitPerBlock := strconv.FormatUint(1500000000, 10) - minGasPrice := strconv.FormatUint(10, 10) - minGasLimit := strconv.FormatUint(10, 10) - - argsNewEconomicsData := economicsHandler.ArgsNewEconomicsData{ - Economics: &config.EconomicsConfig{ - GlobalSettings: config.GlobalSettings{ - GenesisTotalSupply: "2000000000000000000000", - MinimumInflation: 0, - YearSettings: []*config.YearSetting{ - { - Year: 0, - MaximumInflation: 0.01, - }, - }, +func TestSystemSCProcessor_ProcessSystemSmartContractInitDelegationMgr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + t.Run("flag not active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.GovernanceFlagInSpecificEpochOnly || + flag == common.StakingV4Step1Flag || + flag == common.StakingV4Step2Flag || + flag == common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly || + flag == common.StakingV2OwnerFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlagInSpecificEpochOnly || + flag == common.DelegationSmartContractFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlag || + flag == common.SwitchJailWaitingFlag || + flag == common.StakingV2Flag || + flag == common.ESDTFlagInSpecificEpochOnly { + + return false + } + + return true }, - RewardsSettings: config.RewardsSettings{ - RewardsConfigByEpoch: []config.EpochRewardSettings{ - { - LeaderPercentage: 0.1, - DeveloperPercentage: 0.1, - ProtocolSustainabilityPercentage: 0.1, - ProtocolSustainabilityAddress: "protocol", - TopUpGradientPoint: "300000000000000000000", - TopUpFactor: 0.25, - }, - }, + } + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCreateCalled: func(input *vmcommon.ContractCreateInput) (*vmcommon.VMOutput, error) { + assert.Fail(t, "should have not called") + + return nil, fmt.Errorf("should have not called") }, - FeeSettings: config.FeeSettings{ - GasLimitSettings: []config.GasLimitSetting{ - { - MaxGasLimitPerBlock: maxGasLimitPerBlock, - MaxGasLimitPerMiniBlock: maxGasLimitPerBlock, - MaxGasLimitPerMetaBlock: maxGasLimitPerBlock, - MaxGasLimitPerMetaMiniBlock: maxGasLimitPerBlock, - MaxGasLimitPerTx: maxGasLimitPerBlock, - MinGasLimit: minGasLimit, - ExtraGasLimitGuardedTx: "50000", - }, - }, - MinGasPrice: minGasPrice, - GasPerDataByte: "1", - GasPriceModifier: 1.0, - MaxGasPriceSetGuardian: "100000", + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + }) + t.Run("flag active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.DelegationSmartContractFlagInSpecificEpochOnly }, - }, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - } - economicsData, _ := economicsHandler.NewEconomicsData(argsNewEconomicsData) - return economicsData + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCreateCalled: func(input *vmcommon.ContractCreateInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return &vmcommon.VMOutput{}, nil + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + require.True(t, runSmartContractCreateCalled) + }) + t.Run("flag active but contract create call errors, should error", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.DelegationSmartContractFlagInSpecificEpochOnly + }, + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCreateCalled: func(input *vmcommon.ContractCreateInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return nil, expectedErr + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.ErrorIs(t, err, expectedErr) + require.True(t, runSmartContractCreateCalled) + }) } -func TestSystemSCProcessor_ProcessSystemSmartContractInitDelegationMgr(t *testing.T) { +func TestSystemSCProcessor_ProcessSystemSmartContractInitGovernance(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ - StakingV2EnableEpoch: 1000, - }, testscommon.CreateMemUnit()) - s, _ := NewSystemSCProcessor(args) + expectedErr := errors.New("expected error") + t.Run("flag not active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.GovernanceFlagInSpecificEpochOnly || + flag == common.StakingV4Step1Flag || + flag == common.StakingV4Step2Flag || + flag == common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly || + flag == common.StakingV2OwnerFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlagInSpecificEpochOnly || + flag == common.DelegationSmartContractFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlag || + flag == common.SwitchJailWaitingFlag || + flag == common.StakingV2Flag || + flag == common.ESDTFlagInSpecificEpochOnly { + + return false + } - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) - assert.Nil(t, err) + return true + }, + } + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { + assert.Fail(t, "should have not called") - acc, err := s.userAccountsDB.GetExistingAccount(vm.DelegationManagerSCAddress) - assert.Nil(t, err) + return nil, fmt.Errorf("should have not called") + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + }) + t.Run("flag active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.GovernanceFlagInSpecificEpochOnly + }, + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return &vmcommon.VMOutput{}, nil + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + require.True(t, runSmartContractCreateCalled) + }) + t.Run("flag active but contract call errors, should error", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.GovernanceFlagInSpecificEpochOnly + }, + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true - userAcc, _ := acc.(state.UserAccountHandler) - assert.Equal(t, userAcc.GetOwnerAddress(), vm.DelegationManagerSCAddress) - assert.NotNil(t, userAcc.GetCodeMetadata()) + return nil, expectedErr + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.ErrorIs(t, err, expectedErr) + require.Contains(t, err.Error(), "governanceV2") + require.True(t, runSmartContractCreateCalled) + }) } func TestSystemSCProcessor_ProcessDelegationRewardsNothingToExecute(t *testing.T) { @@ -1294,7 +1289,8 @@ func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueue(t * t.Parallel() args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) - args.MaxNodesEnableConfig = []config.MaxNodesChangeConfig{{EpochEnable: 0, MaxNumNodes: 10}} + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(args.EpochNotifier, []config.MaxNodesChangeConfig{{EpochEnable: 0, MaxNumNodes: 10}}) + args.MaxNodesChangeConfigProvider = nodesConfigProvider s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1306,8 +1302,8 @@ func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueue(t * []byte("rewardAddress"), ) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + validatorsInfo := state.NewShardValidatorsInfoMap() + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) peerAcc, err := s.getPeerAccount([]byte("waitingPubKey")) @@ -1341,10 +1337,12 @@ func getTotalNumberOfRegisteredNodes(t *testing.T, s *systemSCProcessor) int { func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueueOwnerNotSet(t *testing.T) { t.Parallel() + maxNodesChangeConfig := []config.MaxNodesChangeConfig{{EpochEnable: 10, MaxNumNodes: 10}} args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ - StakingV2EnableEpoch: 10, + MaxNodesChangeEnableEpoch: maxNodesChangeConfig, + StakingV2EnableEpoch: 10, }, testscommon.CreateMemUnit()) - args.MaxNodesEnableConfig = []config.MaxNodesChangeConfig{{EpochEnable: 10, MaxNumNodes: 10}} + args.MaxNodesChangeConfigProvider, _ = notifier.NewNodesConfigProvider(args.EpochNotifier, maxNodesChangeConfig) s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1359,8 +1357,8 @@ func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueueOwne args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 10, }) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 10) + validatorsInfo := state.NewShardValidatorsInfoMap() + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{Epoch: 10}) assert.Nil(t, err) peerAcc, err := s.getPeerAccount([]byte("waitingPubKey")) @@ -1387,7 +1385,7 @@ func TestSystemSCProcessor_ESDTInitShouldWork(t *testing.T) { require.Equal(t, 4, len(initialContractConfig)) require.Equal(t, []byte("aaaaaa"), initialContractConfig[0]) - err = s.ProcessSystemSmartContract(nil, 1, 1) + err = s.ProcessSystemSmartContract(state.NewShardValidatorsInfoMap(), &block.Header{Nonce: 1, Epoch: 1}) require.Nil(t, err) @@ -1415,47 +1413,48 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeOneNodeStakeOthers(t []byte("rewardAddress"), []byte("rewardAddress"), ) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, + []byte("ownerKey"), + []byte("ownerKey"), + [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, + big.NewInt(2000), + args.Marshalizer, + ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), []byte("ownerKey"), args.Marshalizer) - addValidatorData(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, big.NewInt(2000), args.Marshalizer) - _, _ = args.UserAccountsDB.Commit() - - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) peerAcc, err := s.getPeerAccount([]byte("waitingPubKey")) @@ -1466,10 +1465,10 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeOneNodeStakeOthers(t peerAcc, _ = s.getPeerAccount([]byte("stakedPubKey1")) assert.Equal(t, peerAcc.GetList(), string(common.LeavingList)) - assert.Equal(t, string(common.LeavingList), validatorInfos[0][1].List) + assert.Equal(t, string(common.LeavingList), validatorsInfo.GetShardValidatorsInfoMap()[0][1].GetList()) - assert.Equal(t, 5, len(validatorInfos[0])) - assert.Equal(t, string(common.NewList), validatorInfos[0][4].List) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 5) + assert.Equal(t, string(common.NewList), validatorsInfo.GetShardValidatorsInfoMap()[0][4].GetList()) } func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeTheOnlyNodeShouldWork(t *testing.T) { @@ -1487,18 +1486,18 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeTheOnlyNodeShouldWor []byte("rewardAddress"), ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), []byte("ownerKey"), args.Marshalizer) + stakingcommon.AddStakingData(args.UserAccountsDB, []byte("ownerKey"), []byte("ownerKey"), [][]byte{[]byte("stakedPubKey1")}, args.Marshalizer) addValidatorDataWithUnStakedKey(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey1")}, big.NewInt(1000), args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), @@ -1508,7 +1507,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeTheOnlyNodeShouldWor args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) } @@ -1518,7 +1517,7 @@ func addDelegationData( stakedKeys [][]byte, marshalizer marshal.Marshalizer, ) { - delegatorSC := loadSCAccount(accountsDB, delegation) + delegatorSC := stakingcommon.LoadUserAccount(accountsDB, delegation) dStatus := &systemSmartContracts.DelegationContractStatus{ StakedKeys: make([]*systemSmartContracts.NodesData, 0), NotStakedKeys: make([]*systemSmartContracts.NodesData, 0), @@ -1546,68 +1545,71 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromDelegationContra contract, _ := scContainer.Get(vm.FirstDelegationSCAddress) _ = scContainer.Add(delegationAddr, contract) - prepareStakingContractWithData( + stakingcommon.AddStakingData( args.UserAccountsDB, - []byte("stakedPubKey0"), - []byte("waitingPubKey"), + delegationAddr, + delegationAddr, + [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, args.Marshalizer, + ) + allKeys := [][]byte{[]byte("stakedPubKey0"), []byte("waitingPubKey"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")} + stakingcommon.RegisterValidatorKeys( + args.UserAccountsDB, delegationAddr, delegationAddr, + allKeys, + big.NewInt(3000), + args.Marshalizer, ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), delegationAddr, args.Marshalizer) - allKeys := [][]byte{[]byte("stakedPubKey0"), []byte("waitingPubKey"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")} - addValidatorData(args.UserAccountsDB, delegationAddr, allKeys, big.NewInt(3000), args.Marshalizer) addDelegationData(args.UserAccountsDB, delegationAddr, allKeys, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.WaitingList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.WaitingList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - for _, vInfo := range validatorInfos[0] { - assert.NotEqual(t, string(common.NewList), vInfo.List) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.NotEqual(t, string(common.NewList), vInfo.GetList()) } peerAcc, _ := s.getPeerAccount([]byte("stakedPubKey2")) assert.Equal(t, peerAcc.GetList(), string(common.LeavingList)) - assert.Equal(t, 4, len(validatorInfos[0])) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 4) - delegationSC := loadSCAccount(args.UserAccountsDB, delegationAddr) + delegationSC := stakingcommon.LoadUserAccount(args.UserAccountsDB, delegationAddr) marshalledData, _, err := delegationSC.RetrieveValue([]byte("delegationStatus")) assert.Nil(t, err) dStatus := &systemSmartContracts.DelegationContractStatus{ @@ -1634,67 +1636,55 @@ func TestSystemSCProcessor_ProcessSystemSmartContractShouldUnStakeFromAdditional contract, _ := scContainer.Get(vm.FirstDelegationSCAddress) _ = scContainer.Add(delegationAddr, contract) - prepareStakingContractWithData( - args.UserAccountsDB, - []byte("stakedPubKey0"), - []byte("waitingPubKey"), - args.Marshalizer, - delegationAddr, - delegationAddr, - ) - - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), delegationAddr, args.Marshalizer) + listOfKeysInWaiting := [][]byte{[]byte("waitingPubKey"), []byte("waitingPubKe1"), []byte("waitingPubKe2"), []byte("waitingPubKe3"), []byte("waitingPubKe4")} + allStakedKeys := append(listOfKeysInWaiting, []byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")) - listOfKeysInWaiting := [][]byte{[]byte("waitingPubKe1"), []byte("waitingPubKe2"), []byte("waitingPubKe3"), []byte("waitingPubKe4")} - allStakedKeys := append(listOfKeysInWaiting, []byte("waitingPubKey"), []byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")) - addKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr, delegationAddr) - addValidatorData(args.UserAccountsDB, delegationAddr, allStakedKeys, big.NewInt(4000), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, delegationAddr, delegationAddr, allStakedKeys, big.NewInt(4000), args.Marshalizer) + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr, delegationAddr) addDelegationData(args.UserAccountsDB, delegationAddr, allStakedKeys, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - for _, vInfo := range validatorInfos[0] { - assert.Equal(t, string(common.EligibleList), vInfo.List) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.Equal(t, string(common.EligibleList), vInfo.GetList()) } - delegationSC := loadSCAccount(args.UserAccountsDB, delegationAddr) + delegationSC := stakingcommon.LoadUserAccount(args.UserAccountsDB, delegationAddr) marshalledData, _, err := delegationSC.RetrieveValue([]byte("delegationStatus")) assert.Nil(t, err) dStatus := &systemSmartContracts.DelegationContractStatus{ @@ -1729,10 +1719,14 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( delegationAddr, ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), delegationAddr, args.Marshalizer) - addValidatorData(args.UserAccountsDB, delegationAddr, [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(10000), args.Marshalizer) + stakingcommon.AddStakingData(args.UserAccountsDB, + delegationAddr, + delegationAddr, + [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, + args.Marshalizer, + ) + + stakingcommon.AddValidatorData(args.UserAccountsDB, delegationAddr, [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(10000), args.Marshalizer) addDelegationData(args.UserAccountsDB, delegationAddr, [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() @@ -1741,47 +1735,47 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( _ = scContainer.Add(delegationAddr2, contract) listOfKeysInWaiting := [][]byte{[]byte("waitingPubKe1"), []byte("waitingPubKe2"), []byte("waitingPubKe3"), []byte("waitingPubKe4")} - addKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr2, delegationAddr2) - addValidatorData(args.UserAccountsDB, delegationAddr2, listOfKeysInWaiting, big.NewInt(2000), args.Marshalizer) + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr2, delegationAddr2) + stakingcommon.AddValidatorData(args.UserAccountsDB, delegationAddr2, listOfKeysInWaiting, big.NewInt(2000), args.Marshalizer) addDelegationData(args.UserAccountsDB, delegationAddr2, listOfKeysInWaiting, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - peerAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + peerAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(peerAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - delegationSC := loadSCAccount(args.UserAccountsDB, delegationAddr2) + delegationSC := stakingcommon.LoadUserAccount(args.UserAccountsDB, delegationAddr2) marshalledData, _, err := delegationSC.RetrieveValue([]byte("delegationStatus")) assert.Nil(t, err) dStatus := &systemSmartContracts.DelegationContractStatus{ @@ -1797,7 +1791,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( assert.Equal(t, []byte("waitingPubKe4"), dStatus.UnStakedKeys[0].BLSKey) assert.Equal(t, []byte("waitingPubKe3"), dStatus.UnStakedKeys[1].BLSKey) - stakingSCAcc := loadSCAccount(args.UserAccountsDB, vm.StakingSCAddress) + stakingSCAcc := stakingcommon.LoadUserAccount(args.UserAccountsDB, vm.StakingSCAddress) marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) waitingListHead := &systemSmartContracts.WaitingList{} _ = args.Marshalizer.Unmarshal(waitingListHead, marshaledData) @@ -1819,42 +1813,42 @@ func TestSystemSCProcessor_ProcessSystemSmartContractWrongValidatorInfoShouldBeC []byte("oneAddress1"), ) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("oneAddress1"), List: string(common.EligibleList), RewardAddress: []byte("oneAddress1"), AccumulatedFees: big.NewInt(0), }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - assert.Equal(t, len(validatorInfos[0]), 1) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 1) } func TestSystemSCProcessor_TogglePauseUnPause(t *testing.T) { @@ -1866,14 +1860,14 @@ func TestSystemSCProcessor_TogglePauseUnPause(t *testing.T) { err := s.ToggleUnStakeUnBond(true) assert.Nil(t, err) - validatorSC := loadSCAccount(s.userAccountsDB, vm.ValidatorSCAddress) + validatorSC := stakingcommon.LoadUserAccount(s.userAccountsDB, vm.ValidatorSCAddress) value, _, _ := validatorSC.RetrieveValue([]byte("unStakeUnBondPause")) assert.True(t, value[0] == 1) err = s.ToggleUnStakeUnBond(false) assert.Nil(t, err) - validatorSC = loadSCAccount(s.userAccountsDB, vm.ValidatorSCAddress) + validatorSC = stakingcommon.LoadUserAccount(s.userAccountsDB, vm.ValidatorSCAddress) value, _, _ = validatorSC.RetrieveValue([]byte("unStakeUnBondPause")) assert.True(t, value[0] == 0) } @@ -1905,58 +1899,60 @@ func TestSystemSCProcessor_ProcessSystemSmartContractJailAndUnStake(t *testing.T args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey0"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), []byte("ownerKey"), args.Marshalizer) - saveOneKeyToWaitingList(args.UserAccountsDB, []byte("waitingPubKey"), args.Marshalizer, []byte("ownerKey"), []byte("ownerKey")) - addValidatorData(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(0), args.Marshalizer) + stakingcommon.AddStakingData(args.UserAccountsDB, + []byte("ownerKey"), + []byte("ownerKey"), + [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, + args.Marshalizer, + ) + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, [][]byte{[]byte("waitingPubKey")}, args.Marshalizer, []byte("ownerKey"), []byte("ownerKey")) + stakingcommon.AddValidatorData(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(0), args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) _, err = s.peerAccountsDB.GetExistingAccount([]byte("waitingPubKey")) assert.NotNil(t, err) - assert.Equal(t, 4, len(validatorInfos[0])) - for _, vInfo := range validatorInfos[0] { - assert.Equal(t, vInfo.List, string(common.LeavingList)) - peerAcc, _ := s.getPeerAccount(vInfo.PublicKey) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 4) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.Equal(t, vInfo.GetList(), string(common.LeavingList)) + peerAcc, _ := s.getPeerAccount(vInfo.GetPublicKey()) assert.Equal(t, peerAcc.GetList(), string(common.LeavingList)) } } @@ -1986,28 +1982,484 @@ func TestSystemSCProcessor_ProcessSystemSmartContractSwapJailedWithWaiting(t *te jailedAcc, _ := args.PeerAccountsDB.LoadAccount([]byte("jailedPubKey0")) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - vInfo := &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("jailedPubKey0"), ShardId: 0, List: string(common.JailedList), TempRating: 1, RewardAddress: []byte("address"), AccumulatedFees: big.NewInt(0), - } - validatorInfos[0] = append(validatorInfos[0], vInfo) - - vInfo1 := &state.ValidatorInfo{ + }) + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("waitingPubKey"), ShardId: 0, List: string(common.WaitingList), - } - validatorInfos[0] = append(validatorInfos[0], vInfo1) + }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - assert.Equal(t, 2, len(validatorInfos[0])) - newValidatorInfo := validatorInfos[0][0] - assert.Equal(t, newValidatorInfo.List, string(common.NewList)) + require.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 2) + newValidatorInfo := validatorsInfo.GetShardValidatorsInfoMap()[0][0] + require.Equal(t, newValidatorInfo.GetList(), string(common.NewList)) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractStakingV4Init(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + s, _ := NewSystemSCProcessor(args) + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner3 := []byte("owner3") + + owner1ListPubKeysWaiting := [][]byte{[]byte("waitingPubKe0"), []byte("waitingPubKe1"), []byte("waitingPubKe2")} + owner1ListPubKeysStaked := [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1")} + owner1AllPubKeys := append(owner1ListPubKeysWaiting, owner1ListPubKeysStaked...) + + owner2ListPubKeysWaiting := [][]byte{[]byte("waitingPubKe3"), []byte("waitingPubKe4")} + owner2ListPubKeysStaked := [][]byte{[]byte("stakedPubKey2")} + owner2AllPubKeys := append(owner2ListPubKeysWaiting, owner2ListPubKeysStaked...) + + owner3ListPubKeysWaiting := [][]byte{[]byte("waitingPubKe5"), []byte("waitingPubKe6")} + + // Owner1 has 2 staked nodes (one eligible, one waiting) in shard0 + 3 nodes in staking queue. + // It has enough stake so that all his staking queue nodes will be selected in the auction list + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, owner1ListPubKeysWaiting, args.Marshalizer, owner1, owner1) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner1, owner1, owner1AllPubKeys, big.NewInt(5000), args.Marshalizer) + + // Owner2 has 1 staked node (eligible) in shard1 + 2 nodes in staking queue. + // It has enough stake for only ONE node from staking queue to be selected in the auction list + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, owner2ListPubKeysWaiting, args.Marshalizer, owner2, owner2) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner2, owner2, owner2AllPubKeys, big.NewInt(2500), args.Marshalizer) + + // Owner3 has 0 staked node + 2 nodes in staking queue. + // It has enough stake so that all his staking queue nodes will be selected in the auction list + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, owner3ListPubKeysWaiting, args.Marshalizer, owner3, owner3) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner3, owner3, owner3ListPubKeysWaiting, big.NewInt(2000), args.Marshalizer) + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1ListPubKeysStaked[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner1ListPubKeysStaked[1], common.WaitingList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner2ListPubKeysStaked[0], common.EligibleList, "", 1, owner2)) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step1EnableEpoch}) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1ListPubKeysStaked[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner1ListPubKeysStaked[1], common.WaitingList, "", 0, owner1), + }, + 1: { + createValidatorInfo(owner2ListPubKeysStaked[0], common.EligibleList, "", 1, owner2), + }, + } + + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractStakingV4EnabledCannotPrepareStakingData(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + + errProcessStakingData := errors.New("error processing staking data") + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + PrepareStakingDataCalled: func(validatorsMap state.ShardValidatorsInfoMapHandler) error { + return errProcessStakingData + }, + } + + owner := []byte("owner") + ownerStakedKeys := [][]byte{[]byte("pubKey0"), []byte("pubKey1")} + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner, owner, ownerStakedKeys, big.NewInt(2000), args.Marshalizer) + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(ownerStakedKeys[0], common.AuctionList, "", 0, owner)) + _ = validatorsInfo.Add(createValidatorInfo(ownerStakedKeys[1], common.AuctionList, "", 0, owner)) + + s, _ := NewSystemSCProcessor(args) + s.EpochConfirmed(stakingV4Step2EnableEpoch, 0) + + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Equal(t, errProcessStakingData, err) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractStakingV4Enabled(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(args.EpochNotifier, []config.MaxNodesChangeConfig{{MaxNumNodes: 9}}) + + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + Denomination: 0, + }) + + argsAuctionListSelector := AuctionListSelectorArgs{ + ShardCoordinator: args.ShardCoordinator, + StakingDataProvider: args.StakingDataProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + SoftAuctionConfig: auctionCfg, + AuctionListDisplayHandler: ald, + } + als, _ := NewAuctionListSelector(argsAuctionListSelector) + args.AuctionListSelector = als + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner3 := []byte("owner3") + owner4 := []byte("owner4") + owner5 := []byte("owner5") + owner6 := []byte("owner6") + owner7 := []byte("owner7") + + owner1StakedKeys := [][]byte{[]byte("pubKey0"), []byte("pubKey1"), []byte("pubKey2")} + owner2StakedKeys := [][]byte{[]byte("pubKey3"), []byte("pubKey4"), []byte("pubKey5")} + owner3StakedKeys := [][]byte{[]byte("pubKey6"), []byte("pubKey7")} + owner4StakedKeys := [][]byte{[]byte("pubKey8"), []byte("pubKey9"), []byte("pubKe10"), []byte("pubKe11")} + owner5StakedKeys := [][]byte{[]byte("pubKe12"), []byte("pubKe13")} + owner6StakedKeys := [][]byte{[]byte("pubKe14"), []byte("pubKe15")} + owner7StakedKeys := [][]byte{[]byte("pubKe16"), []byte("pubKe17")} + + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(6666), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner2, owner2, owner2StakedKeys, big.NewInt(5555), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner3, owner3, owner3StakedKeys, big.NewInt(4444), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner4, owner4, owner4StakedKeys, big.NewInt(6666), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner5, owner5, owner5StakedKeys, big.NewInt(1500), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner6, owner6, owner6StakedKeys, big.NewInt(1500), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner7, owner7, owner7StakedKeys, big.NewInt(1500), args.Marshalizer) + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[1], common.WaitingList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[2], common.AuctionList, "", 0, owner1)) + + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[0], common.EligibleList, "", 1, owner2)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[1], common.AuctionList, "", 1, owner2)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[2], common.AuctionList, "", 1, owner2)) + + _ = validatorsInfo.Add(createValidatorInfo(owner3StakedKeys[0], common.LeavingList, "", 1, owner3)) + _ = validatorsInfo.Add(createValidatorInfo(owner3StakedKeys[1], common.AuctionList, "", 1, owner3)) + + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[0], common.JailedList, "", 1, owner4)) + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[1], common.AuctionList, "", 1, owner4)) + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[2], common.AuctionList, "", 1, owner4)) + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[3], common.AuctionList, "", 1, owner4)) + + _ = validatorsInfo.Add(createValidatorInfo(owner5StakedKeys[0], common.EligibleList, "", 1, owner5)) + _ = validatorsInfo.Add(createValidatorInfo(owner5StakedKeys[1], common.AuctionList, "", 1, owner5)) + + _ = validatorsInfo.Add(createValidatorInfo(owner6StakedKeys[0], common.AuctionList, "", 1, owner6)) + _ = validatorsInfo.Add(createValidatorInfo(owner6StakedKeys[1], common.AuctionList, "", 1, owner6)) + + _ = validatorsInfo.Add(createValidatorInfo(owner7StakedKeys[0], common.EligibleList, "", 2, owner7)) + _ = validatorsInfo.Add(createValidatorInfo(owner7StakedKeys[1], common.EligibleList, "", 2, owner7)) + + s, _ := NewSystemSCProcessor(args) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step2EnableEpoch}) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{PrevRandSeed: []byte("pubKey7")}) + require.Nil(t, err) + + /* + - owner5 does not have enough stake for 2 nodes=> his auction node (pubKe13) will be unStaked at the end of the epoch => + will not participate in auction selection + - owner6 does not have enough stake for 2 nodes => one of his auction nodes(pubKey14) will be unStaked at the end of the epoch => + his other auction node(pubKey15) will not participate in auction selection + - MaxNumNodes = 9 + - EligibleBlsKeys = 5 (pubKey0, pubKey1, pubKey3, pubKe13, pubKey17) + - QualifiedAuctionBlsKeys = 7 (pubKey2, pubKey4, pubKey5, pubKey7, pubKey9, pubKey10, pubKey11) + We can only select (MaxNumNodes - EligibleBlsKeys = 3) bls keys from AuctionList to be added to NewList + + -> Initial nodes config in auction list is: + +--------+------------------+------------------+-------------------+--------------+-----------------+---------------------------+ + | Owner | Num staked nodes | Num active nodes | Num auction nodes | Total top up | Top up per node | Auction list nodes | + +--------+------------------+------------------+-------------------+--------------+-----------------+---------------------------+ + | owner3 | 2 | 1 | 1 | 2444 | 1222 | pubKey7 | + | owner4 | 4 | 1 | 3 | 2666 | 666 | pubKey9, pubKe10, pubKe11 | + | owner1 | 3 | 2 | 1 | 3666 | 1222 | pubKey2 | + | owner2 | 3 | 1 | 2 | 2555 | 851 | pubKey4, pubKey5 | + +--------+------------------+------------------+-------------------+--------------+-----------------+---------------------------+ + -> Min possible topUp = 666; max possible topUp = 1333, min required topUp = 1216 + -> Selected nodes config in auction list. For each owner's auction nodes, qualified ones are selected by sorting the bls keys + +--------+------------------+----------------+--------------+-------------------+-----------------------------+------------------+---------------------------+-----------------------------+ + | Owner | Num staked nodes | TopUp per node | Total top up | Num auction nodes | Num qualified auction nodes | Num active nodes | Qualified top up per node | Selected auction list nodes | + +--------+------------------+----------------+--------------+-------------------+-----------------------------+------------------+---------------------------+-----------------------------+ + | owner1 | 3 | 1222 | 3666 | 1 | 1 | 2 | 1222 | pubKey2 | + | owner2 | 3 | 851 | 2555 | 2 | 1 | 1 | 1277 | pubKey5 | + | owner3 | 2 | 1222 | 2444 | 1 | 1 | 1 | 1222 | pubKey7 | + | owner4 | 4 | 666 | 2666 | 3 | 1 | 1 | 1333 | pubKey9 | + +--------+------------------+----------------+--------------+-------------------+-----------------------------+------------------+---------------------------+-----------------------------+ + -> Final selected nodes from auction list + +--------+----------------+--------------------------+ + | Owner | Registered key | Qualified TopUp per node | + +--------+----------------+--------------------------+ + | owner4 | pubKey9 | 1333 | + | owner2 | pubKey5 | 1277 | + | owner1 | pubKey2 | 1222 | + +--------+----------------+--------------------------+ + | owner3 | pubKey7 | 1222 | + +--------+----------------+--------------------------+ + + The following have 1222 top up per node: + - owner1 with 1 bls key = pubKey2 + - owner3 with 1 bls key = pubKey7 + + Since randomness = []byte("pubKey7"), nodes will be sorted based on blsKey XOR randomness, therefore: + - XOR1 = []byte("pubKey2") XOR []byte("pubKey7") = [0 0 0 0 0 0 5] + - XOR3 = []byte("pubKey7") XOR []byte("pubKey7") = [0 0 0 0 0 0 0] + */ + requireTopUpPerNodes(t, s.stakingDataProvider, owner1StakedKeys, big.NewInt(1222)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner2StakedKeys, big.NewInt(851)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner3StakedKeys, big.NewInt(1222)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner4StakedKeys, big.NewInt(666)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner5StakedKeys, big.NewInt(0)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner6StakedKeys, big.NewInt(0)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner7StakedKeys, big.NewInt(0)) + + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner1StakedKeys[1], common.WaitingList, "", 0, owner1), + createValidatorInfo(owner1StakedKeys[2], common.SelectedFromAuctionList, common.AuctionList, 0, owner1), + }, + 1: { + createValidatorInfo(owner2StakedKeys[0], common.EligibleList, "", 1, owner2), + createValidatorInfo(owner2StakedKeys[1], common.AuctionList, "", 1, owner2), + createValidatorInfo(owner2StakedKeys[2], common.SelectedFromAuctionList, common.AuctionList, 1, owner2), + + createValidatorInfo(owner3StakedKeys[0], common.LeavingList, "", 1, owner3), + createValidatorInfo(owner3StakedKeys[1], common.AuctionList, "", 1, owner3), + + createValidatorInfo(owner4StakedKeys[0], common.JailedList, "", 1, owner4), + createValidatorInfo(owner4StakedKeys[1], common.SelectedFromAuctionList, common.AuctionList, 1, owner4), + createValidatorInfo(owner4StakedKeys[2], common.AuctionList, "", 1, owner4), + createValidatorInfo(owner4StakedKeys[3], common.AuctionList, "", 1, owner4), + + createValidatorInfo(owner5StakedKeys[0], common.EligibleList, "", 1, owner5), + createValidatorInfo(owner5StakedKeys[1], common.LeavingList, common.AuctionList, 1, owner5), + + createValidatorInfo(owner6StakedKeys[0], common.LeavingList, common.AuctionList, 1, owner6), + createValidatorInfo(owner6StakedKeys[1], common.AuctionList, "", 1, owner6), + }, + 2: { + createValidatorInfo(owner7StakedKeys[0], common.LeavingList, common.EligibleList, 2, owner7), + createValidatorInfo(owner7StakedKeys[1], common.EligibleList, "", 2, owner7), + }, + } + + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) +} + +func TestSystemSCProcessor_LegacyEpochConfirmedCorrectMaxNumNodesAfterNodeRestart(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + nodesConfigEpoch0 := config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + } + nodesConfigEpoch1 := config.MaxNodesChangeConfig{ + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + } + nodesConfigEpoch6 := config.MaxNodesChangeConfig{ + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 1, + } + nodesConfigProvider, _ := notifier.NewNodesConfigProvider( + args.EpochNotifier, + []config.MaxNodesChangeConfig{ + nodesConfigEpoch0, + nodesConfigEpoch1, + nodesConfigEpoch6, + }) + args.MaxNodesChangeConfigProvider = nodesConfigProvider + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV2Flag) + validatorsInfoMap := state.NewShardValidatorsInfoMap() + s, _ := NewSystemSCProcessor(args) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 0, Nonce: 0}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err := s.processLegacy(validatorsInfoMap, 0, 0) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch0.MaxNumNodes, s.maxNodes) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 1, Nonce: 1}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 1, 1) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch1.MaxNumNodes, s.maxNodes) + + for epoch := uint32(2); epoch <= 5; epoch++ { + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: epoch, Nonce: uint64(epoch)}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, uint64(epoch), epoch) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch1.MaxNumNodes, s.maxNodes) + } + + // simulate restart + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 0, Nonce: 0}) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 5, Nonce: 5}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 5, 5) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch1.MaxNumNodes, s.maxNodes) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 6, Nonce: 6}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 6, 6) + require.Equal(t, epochStart.ErrInvalidMaxNumberOfNodes, err) + + args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub).AddActiveFlags(common.StakingV4StartedFlag) + err = s.processLegacy(validatorsInfoMap, 6, 6) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) + + // simulate restart + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 0, Nonce: 0}) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 6, Nonce: 6}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 6, 6) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) + + for epoch := uint32(7); epoch <= 20; epoch++ { + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: epoch, Nonce: uint64(epoch)}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, uint64(epoch), epoch) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) + } + + // simulate restart + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 1, Nonce: 1}) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 21, Nonce: 21}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 21, 21) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractNilInputValues(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + s, _ := NewSystemSCProcessor(args) + + t.Run("nil validators info map, expect error", func(t *testing.T) { + t.Parallel() + + blockHeader := &block.Header{Nonce: 4} + err := s.ProcessSystemSmartContract(nil, blockHeader) + require.True(t, strings.Contains(err.Error(), errNilValidatorsInfoMap.Error())) + require.True(t, strings.Contains(err.Error(), fmt.Sprintf("%d", blockHeader.GetNonce()))) + }) + + t.Run("nil header, expect error", func(t *testing.T) { + t.Parallel() + + validatorsInfoMap := state.NewShardValidatorsInfoMap() + err := s.ProcessSystemSmartContract(validatorsInfoMap, nil) + require.Equal(t, process.ErrNilHeaderHandler, err) + }) +} + +func TestLegacySystemSCProcessor_addNewlyStakedNodesToValidatorTrie(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + sysProc, _ := NewSystemSCProcessor(args) + + pubKey := []byte("pubKey") + existingValidator := &state.ValidatorInfo{ + PublicKey: pubKey, + List: "inactive", + } + + nonce := uint64(4) + newList := common.AuctionList + newlyAddedValidator := &state.ValidatorInfo{ + PublicKey: pubKey, + List: string(newList), + Index: uint32(nonce), + TempRating: sysProc.startRating, + Rating: sysProc.startRating, + RewardAddress: pubKey, + AccumulatedFees: big.NewInt(0), + } + + // Check before stakingV4, we should have both validators + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(existingValidator) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step1EnableEpoch - 1, Nonce: 1}) + err := sysProc.addNewlyStakedNodesToValidatorTrie( + validatorsInfo, + [][]byte{pubKey, pubKey}, + nonce, + newList, + ) + require.Nil(t, err) + require.Equal(t, map[uint32][]state.ValidatorInfoHandler{ + 0: {existingValidator, newlyAddedValidator}, + }, validatorsInfo.GetShardValidatorsInfoMap()) + + // Check after stakingV4, we should only have the new one + validatorsInfo = state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(existingValidator) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step1EnableEpoch, Nonce: 1}) + err = sysProc.addNewlyStakedNodesToValidatorTrie( + validatorsInfo, + [][]byte{pubKey, pubKey}, + nonce, + newList, + ) + require.Nil(t, err) + require.Equal(t, map[uint32][]state.ValidatorInfoHandler{ + 0: {newlyAddedValidator}, + }, validatorsInfo.GetShardValidatorsInfoMap()) +} + +func requireTopUpPerNodes(t *testing.T, s epochStart.StakingDataProvider, stakedPubKeys [][]byte, topUp *big.Int) { + for _, pubKey := range stakedPubKeys { + owner, err := s.GetBlsKeyOwner(pubKey) + require.Nil(t, err) + + totalTopUp := s.GetOwnersData()[owner].TotalTopUp + topUpPerNode := big.NewInt(0).Div(totalTopUp, big.NewInt(int64(len(stakedPubKeys)))) + require.Equal(t, topUp, topUpPerNode) + } +} + +// This func sets rating and temp rating with the start rating value used in createFullArgumentsForSystemSCProcessing +func createValidatorInfo(pubKey []byte, list common.PeerType, previousList common.PeerType, shardID uint32, owner []byte) *state.ValidatorInfo { + rating := uint32(5) + + return &state.ValidatorInfo{ + PublicKey: pubKey, + List: string(list), + PreviousList: string(previousList), + ShardId: shardID, + RewardAddress: owner, + AccumulatedFees: zero, + Rating: rating, + TempRating: rating, + } } diff --git a/epochStart/metachain/tableDisplayer.go b/epochStart/metachain/tableDisplayer.go new file mode 100644 index 00000000000..275805489dc --- /dev/null +++ b/epochStart/metachain/tableDisplayer.go @@ -0,0 +1,32 @@ +package metachain + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/display" +) + +type tableDisplayer struct { +} + +// NewTableDisplayer will create a component able to display tables in logger +func NewTableDisplayer() *tableDisplayer { + return &tableDisplayer{} +} + +// DisplayTable will display a table in the log +func (tb *tableDisplayer) DisplayTable(tableHeader []string, lines []*display.LineData, message string) { + table, err := display.CreateTableString(tableHeader, lines) + if err != nil { + log.Error("could not create table", "tableHeader", tableHeader, "error", err) + return + } + + msg := fmt.Sprintf("%s\n%s", message, table) + log.Debug(msg) +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (tb *tableDisplayer) IsInterfaceNil() bool { + return tb == nil +} diff --git a/epochStart/metachain/validatorList.go b/epochStart/metachain/validatorList.go new file mode 100644 index 00000000000..75c38a1b3c2 --- /dev/null +++ b/epochStart/metachain/validatorList.go @@ -0,0 +1,27 @@ +package metachain + +import ( + "bytes" + + "github.com/multiversx/mx-chain-go/state" +) + +type validatorList []state.ValidatorInfoHandler + +// Len will return the length of the validatorList +func (v validatorList) Len() int { return len(v) } + +// Swap will interchange the objects on input indexes +func (v validatorList) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +// Less will return true if object on index i should appear before object in index j +// Sorting of validators should be by index and public key +func (v validatorList) Less(i, j int) bool { + if v[i].GetTempRating() == v[j].GetTempRating() { + if v[i].GetIndex() == v[j].GetIndex() { + return bytes.Compare(v[i].GetPublicKey(), v[j].GetPublicKey()) < 0 + } + return v[i].GetIndex() < v[j].GetIndex() + } + return v[i].GetTempRating() < v[j].GetTempRating() +} diff --git a/epochStart/metachain/validators.go b/epochStart/metachain/validators.go index 081944230db..e8eff547a09 100644 --- a/epochStart/metachain/validators.go +++ b/epochStart/metachain/validators.go @@ -93,7 +93,7 @@ func NewValidatorInfoCreator(args ArgsNewValidatorInfoCreator) (*validatorInfoCr } // CreateValidatorInfoMiniBlocks creates the validatorInfo mini blocks according to the provided validatorInfo map -func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { +func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { if validatorsInfo == nil { return nil, epochStart.ErrNilValidatorInfo } @@ -102,8 +102,9 @@ func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo ma miniBlocks := make([]*block.MiniBlock, 0) + validatorsMap := validatorsInfo.GetShardValidatorsInfoMap() for shardId := uint32(0); shardId < vic.shardCoordinator.NumberOfShards(); shardId++ { - validators := validatorsInfo[shardId] + validators := validatorsMap[shardId] if len(validators) == 0 { continue } @@ -116,7 +117,7 @@ func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo ma miniBlocks = append(miniBlocks, miniBlock) } - validators := validatorsInfo[core.MetachainShardId] + validators := validatorsMap[core.MetachainShardId] if len(validators) == 0 { return miniBlocks, nil } @@ -131,19 +132,19 @@ func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo ma return miniBlocks, nil } -func (vic *validatorInfoCreator) createMiniBlock(validatorsInfo []*state.ValidatorInfo) (*block.MiniBlock, error) { +func (vic *validatorInfoCreator) createMiniBlock(validatorsInfo []state.ValidatorInfoHandler) (*block.MiniBlock, error) { miniBlock := &block.MiniBlock{} miniBlock.SenderShardID = vic.shardCoordinator.SelfId() miniBlock.ReceiverShardID = core.AllShardId miniBlock.TxHashes = make([][]byte, len(validatorsInfo)) miniBlock.Type = block.PeerBlock - validatorsCopy := make([]*state.ValidatorInfo, len(validatorsInfo)) - copy(validatorsCopy, validatorsInfo) + validatorCopy := make([]state.ValidatorInfoHandler, len(validatorsInfo)) + copy(validatorCopy, validatorsInfo) - vic.sortValidators(validatorsCopy) + vic.sortValidators(validatorCopy) - for index, validator := range validatorsCopy { + for index, validator := range validatorCopy { shardValidatorInfo := createShardValidatorInfo(validator) shardValidatorInfoData, err := vic.getShardValidatorInfoData(shardValidatorInfo) @@ -157,7 +158,7 @@ func (vic *validatorInfoCreator) createMiniBlock(validatorsInfo []*state.Validat return miniBlock, nil } -func (vic *validatorInfoCreator) sortValidators(validators []*state.ValidatorInfo) { +func (vic *validatorInfoCreator) sortValidators(validators []state.ValidatorInfoHandler) { if vic.enableEpochsHandler.IsFlagEnabled(common.DeterministicSortOnValidatorsInfoFixFlag) { vic.deterministicSortValidators(validators) return @@ -166,9 +167,9 @@ func (vic *validatorInfoCreator) sortValidators(validators []*state.ValidatorInf vic.legacySortValidators(validators) } -func (vic *validatorInfoCreator) deterministicSortValidators(validators []*state.ValidatorInfo) { +func (vic *validatorInfoCreator) deterministicSortValidators(validators []state.ValidatorInfoHandler) { sort.SliceStable(validators, func(a, b int) bool { - result := bytes.Compare(validators[a].PublicKey, validators[b].PublicKey) + result := bytes.Compare(validators[a].GetPublicKey(), validators[b].GetPublicKey()) if result != 0 { return result < 0 } @@ -177,7 +178,8 @@ func (vic *validatorInfoCreator) deterministicSortValidators(validators []*state bValidatorString := validators[b].GoString() // possible issues as we have 2 entries with the same public key. Print & assure deterministic sorting log.Warn("found 2 entries in validatorInfoCreator.deterministicSortValidators with the same public key", - "validator a", aValidatorString, "validator b", bValidatorString) + "validator a", aValidatorString, "validator b", bValidatorString, + "validator a pub key", validators[a].GetPublicKey(), "validator b pub key", validators[b].GetPublicKey()) // since the GoString will include all fields, we do not need to marshal the struct again. Strings comparison will // suffice in this case. @@ -185,12 +187,12 @@ func (vic *validatorInfoCreator) deterministicSortValidators(validators []*state }) } -func (vic *validatorInfoCreator) legacySortValidators(validators []*state.ValidatorInfo) { +func (vic *validatorInfoCreator) legacySortValidators(validators []state.ValidatorInfoHandler) { swap := func(a, b int) { validators[a], validators[b] = validators[b], validators[a] } less := func(a, b int) bool { - return bytes.Compare(validators[a].PublicKey, validators[b].PublicKey) < 0 + return bytes.Compare(validators[a].GetPublicKey(), validators[b].GetPublicKey()) < 0 } compatibility.SortSlice(swap, less, len(validators)) } @@ -219,18 +221,23 @@ func (vic *validatorInfoCreator) getShardValidatorInfoHash(shardValidatorInfo *s return shardValidatorInfoHash, nil } -func createShardValidatorInfo(validator *state.ValidatorInfo) *state.ShardValidatorInfo { +func createShardValidatorInfo(validator state.ValidatorInfoHandler) *state.ShardValidatorInfo { return &state.ShardValidatorInfo{ - PublicKey: validator.PublicKey, - ShardId: validator.ShardId, - List: validator.List, - Index: validator.Index, - TempRating: validator.TempRating, + PublicKey: validator.GetPublicKey(), + ShardId: validator.GetShardId(), + List: validator.GetList(), + PreviousList: validator.GetPreviousList(), + Index: validator.GetIndex(), + PreviousIndex: validator.GetPreviousIndex(), + TempRating: validator.GetTempRating(), } } // VerifyValidatorInfoMiniBlocks verifies if received validator info mini blocks are correct -func (vic *validatorInfoCreator) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error { +func (vic *validatorInfoCreator) VerifyValidatorInfoMiniBlocks( + miniBlocks []*block.MiniBlock, + validatorsInfo state.ShardValidatorsInfoMapHandler, +) error { if len(miniBlocks) == 0 { return epochStart.ErrNilMiniblocks } diff --git a/epochStart/metachain/validators_test.go b/epochStart/metachain/validators_test.go index 9589943162f..662b0192044 100644 --- a/epochStart/metachain/validators_test.go +++ b/epochStart/metachain/validators_test.go @@ -30,90 +30,90 @@ import ( "github.com/stretchr/testify/require" ) -func createMockValidatorInfo() map[uint32][]*state.ValidatorInfo { - validatorInfo := map[uint32][]*state.ValidatorInfo{ - 0: { - &state.ValidatorInfo{ - PublicKey: []byte("a1"), - ShardId: 0, - List: "eligible", - Index: 1, - TempRating: 100, - Rating: 1000, - RewardAddress: []byte("rewardA1"), - LeaderSuccess: 1, - LeaderFailure: 2, - ValidatorSuccess: 3, - ValidatorFailure: 4, - TotalLeaderSuccess: 10, - TotalLeaderFailure: 20, - TotalValidatorSuccess: 30, - TotalValidatorFailure: 40, - NumSelectedInSuccessBlocks: 5, - AccumulatedFees: big.NewInt(100), - }, - &state.ValidatorInfo{ - PublicKey: []byte("a2"), - ShardId: 0, - List: "waiting", - Index: 2, - TempRating: 101, - Rating: 1001, - RewardAddress: []byte("rewardA2"), - LeaderSuccess: 6, - LeaderFailure: 7, - ValidatorSuccess: 8, - ValidatorFailure: 9, - TotalLeaderSuccess: 60, - TotalLeaderFailure: 70, - TotalValidatorSuccess: 80, - TotalValidatorFailure: 90, - NumSelectedInSuccessBlocks: 10, - AccumulatedFees: big.NewInt(101), - }, - }, - core.MetachainShardId: { - &state.ValidatorInfo{ - PublicKey: []byte("m1"), - ShardId: core.MetachainShardId, - List: "eligible", - Index: 1, - TempRating: 100, - Rating: 1000, - RewardAddress: []byte("rewardM1"), - LeaderSuccess: 1, - LeaderFailure: 2, - ValidatorSuccess: 3, - ValidatorFailure: 4, - TotalLeaderSuccess: 10, - TotalLeaderFailure: 20, - TotalValidatorSuccess: 30, - TotalValidatorFailure: 40, - NumSelectedInSuccessBlocks: 5, - AccumulatedFees: big.NewInt(100), - }, - &state.ValidatorInfo{ - PublicKey: []byte("m0"), - ShardId: core.MetachainShardId, - List: "waiting", - Index: 2, - TempRating: 101, - Rating: 1001, - RewardAddress: []byte("rewardM2"), - LeaderSuccess: 6, - LeaderFailure: 7, - ValidatorSuccess: 8, - ValidatorFailure: 9, - TotalLeaderSuccess: 60, - TotalLeaderFailure: 70, - TotalValidatorSuccess: 80, - TotalValidatorFailure: 90, - NumSelectedInSuccessBlocks: 10, - AccumulatedFees: big.NewInt(101), - }, - }, - } - return validatorInfo +func createMockValidatorInfo() state.ShardValidatorsInfoMapHandler { + validatorsInfo := state.NewShardValidatorsInfoMap() + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("a1"), + ShardId: 0, + List: "eligible", + Index: 1, + TempRating: 100, + Rating: 1000, + RewardAddress: []byte("rewardA1"), + LeaderSuccess: 1, + LeaderFailure: 2, + ValidatorSuccess: 3, + ValidatorFailure: 4, + TotalLeaderSuccess: 10, + TotalLeaderFailure: 20, + TotalValidatorSuccess: 30, + TotalValidatorFailure: 40, + NumSelectedInSuccessBlocks: 5, + AccumulatedFees: big.NewInt(100), + }) + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("a2"), + ShardId: 0, + List: "waiting", + Index: 2, + TempRating: 101, + Rating: 1001, + RewardAddress: []byte("rewardA2"), + LeaderSuccess: 6, + LeaderFailure: 7, + ValidatorSuccess: 8, + ValidatorFailure: 9, + TotalLeaderSuccess: 60, + TotalLeaderFailure: 70, + TotalValidatorSuccess: 80, + TotalValidatorFailure: 90, + NumSelectedInSuccessBlocks: 10, + AccumulatedFees: big.NewInt(101), + }) + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("m1"), + ShardId: core.MetachainShardId, + List: "eligible", + Index: 1, + TempRating: 100, + Rating: 1000, + RewardAddress: []byte("rewardM1"), + LeaderSuccess: 1, + LeaderFailure: 2, + ValidatorSuccess: 3, + ValidatorFailure: 4, + TotalLeaderSuccess: 10, + TotalLeaderFailure: 20, + TotalValidatorSuccess: 30, + TotalValidatorFailure: 40, + NumSelectedInSuccessBlocks: 5, + AccumulatedFees: big.NewInt(100), + }) + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("m0"), + ShardId: core.MetachainShardId, + List: "waiting", + Index: 2, + TempRating: 101, + Rating: 1001, + RewardAddress: []byte("rewardM2"), + LeaderSuccess: 6, + LeaderFailure: 7, + ValidatorSuccess: 8, + ValidatorFailure: 9, + TotalLeaderSuccess: 60, + TotalLeaderFailure: 70, + TotalValidatorSuccess: 80, + TotalValidatorFailure: 90, + NumSelectedInSuccessBlocks: 10, + AccumulatedFees: big.NewInt(101), + }) + + return validatorsInfo } func createMockEpochValidatorInfoCreatorsArguments() ArgsNewValidatorInfoCreator { @@ -145,7 +145,7 @@ func createMockEpochValidatorInfoCreatorsArguments() ArgsNewValidatorInfoCreator return argsNewEpochEconomics } -func verifyMiniBlocks(bl *block.MiniBlock, infos []*state.ValidatorInfo, marshalledShardValidatorsInfo [][]byte, marshalizer marshal.Marshalizer) bool { +func verifyMiniBlocks(bl *block.MiniBlock, infos []state.ValidatorInfoHandler, marshalledShardValidatorsInfo [][]byte, marshalizer marshal.Marshalizer) bool { if bl.SenderShardID != core.MetachainShardId || bl.ReceiverShardID != core.AllShardId || len(bl.TxHashes) == 0 || @@ -153,10 +153,10 @@ func verifyMiniBlocks(bl *block.MiniBlock, infos []*state.ValidatorInfo, marshal return false } - validatorCopy := make([]*state.ValidatorInfo, len(infos)) + validatorCopy := make([]state.ValidatorInfoHandler, len(infos)) copy(validatorCopy, infos) sort.Slice(validatorCopy, func(a, b int) bool { - return bytes.Compare(validatorCopy[a].PublicKey, validatorCopy[b].PublicKey) < 0 + return bytes.Compare(validatorCopy[a].GetPublicKey(), validatorCopy[b].GetPublicKey()) < 0 }) for i, marshalledShardValidatorInfo := range marshalledShardValidatorsInfo { @@ -304,22 +304,22 @@ func TestEpochValidatorInfoCreator_CreateValidatorInfoMiniBlocksShouldBeCorrect( vic, _ := NewValidatorInfoCreator(arguments) mbs, _ := vic.CreateValidatorInfoMiniBlocks(validatorInfo) - shardValidatorInfo := make([]*state.ShardValidatorInfo, len(validatorInfo[0])) - marshalledShardValidatorInfo := make([][]byte, len(validatorInfo[0])) - for i := 0; i < len(validatorInfo[0]); i++ { - shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo[0][i]) + shardValidatorInfo := make([]*state.ShardValidatorInfo, len(validatorInfo.GetShardValidatorsInfoMap()[0])) + marshalledShardValidatorInfo := make([][]byte, len(validatorInfo.GetShardValidatorsInfoMap()[0])) + for i := 0; i < len(validatorInfo.GetShardValidatorsInfoMap()[0]); i++ { + shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo.GetShardValidatorsInfoMap()[0][i]) marshalledShardValidatorInfo[i], _ = arguments.Marshalizer.Marshal(shardValidatorInfo[i]) } - correctMB0 := verifyMiniBlocks(mbs[0], validatorInfo[0], marshalledShardValidatorInfo, arguments.Marshalizer) + correctMB0 := verifyMiniBlocks(mbs[0], validatorInfo.GetShardValidatorsInfoMap()[0], marshalledShardValidatorInfo, arguments.Marshalizer) require.True(t, correctMB0) - shardValidatorInfo = make([]*state.ShardValidatorInfo, len(validatorInfo[core.MetachainShardId])) - marshalledShardValidatorInfo = make([][]byte, len(validatorInfo[core.MetachainShardId])) - for i := 0; i < len(validatorInfo[core.MetachainShardId]); i++ { - shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo[core.MetachainShardId][i]) + shardValidatorInfo = make([]*state.ShardValidatorInfo, len(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId])) + marshalledShardValidatorInfo = make([][]byte, len(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId])) + for i := 0; i < len(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId]); i++ { + shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId][i]) marshalledShardValidatorInfo[i], _ = arguments.Marshalizer.Marshal(shardValidatorInfo[i]) } - correctMbMeta := verifyMiniBlocks(mbs[1], validatorInfo[core.MetachainShardId], marshalledShardValidatorInfo, arguments.Marshalizer) + correctMbMeta := verifyMiniBlocks(mbs[1], validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId], marshalledShardValidatorInfo, arguments.Marshalizer) require.True(t, correctMbMeta) } @@ -398,11 +398,11 @@ func TestEpochValidatorInfoCreator_VerifyValidatorInfoMiniBlocksNilOneMiniblock( } func createValidatorInfoMiniBlocks( - validatorInfo map[uint32][]*state.ValidatorInfo, + validatorInfo state.ShardValidatorsInfoMapHandler, arguments ArgsNewValidatorInfoCreator, ) []*block.MiniBlock { miniblocks := make([]*block.MiniBlock, 0) - for _, validators := range validatorInfo { + for _, validators := range validatorInfo.GetShardValidatorsInfoMap() { if len(validators) == 0 { continue } @@ -413,10 +413,10 @@ func createValidatorInfoMiniBlocks( miniBlock.TxHashes = make([][]byte, len(validators)) miniBlock.Type = block.PeerBlock - validatorCopy := make([]*state.ValidatorInfo, len(validators)) + validatorCopy := make([]state.ValidatorInfoHandler, len(validators)) copy(validatorCopy, validators) sort.Slice(validatorCopy, func(a, b int) bool { - return bytes.Compare(validatorCopy[a].PublicKey, validatorCopy[b].PublicKey) < 0 + return bytes.Compare(validatorCopy[a].GetPublicKey(), validatorCopy[b].GetPublicKey()) < 0 }) for index, validator := range validatorCopy { @@ -1129,7 +1129,7 @@ func testCreateMiniblockBackwardsCompatibility(t *testing.T, deterministFixEnabl require.Equal(t, len(input), len(expected)) - validators := make([]*state.ValidatorInfo, 0, len(input)) + validators := state.NewShardValidatorsInfoMap() marshaller := &marshal.GogoProtoMarshalizer{} for _, marshalledData := range input { vinfo := &state.ValidatorInfo{} @@ -1139,7 +1139,8 @@ func testCreateMiniblockBackwardsCompatibility(t *testing.T, deterministFixEnabl err = marshaller.Unmarshal(vinfo, buffMarshalledData) require.Nil(t, err) - validators = append(validators, vinfo) + err = validators.Add(vinfo) + require.Nil(t, err) } arguments := createMockEpochValidatorInfoCreatorsArguments() @@ -1157,7 +1158,7 @@ func testCreateMiniblockBackwardsCompatibility(t *testing.T, deterministFixEnabl arguments.ValidatorInfoStorage = storer vic, _ := NewValidatorInfoCreator(arguments) - mb, err := vic.createMiniBlock(validators) + mb, err := vic.createMiniBlock(validators.GetAllValidatorsInfo()) require.Nil(t, err) // test all generated miniblock's "txhashes" are the same with the expected ones @@ -1274,7 +1275,7 @@ func TestValidatorInfoCreator_sortValidators(t *testing.T) { } vic, _ := NewValidatorInfoCreator(arguments) - list := []*state.ValidatorInfo{thirdValidator, secondValidator, firstValidator} + list := []state.ValidatorInfoHandler{thirdValidator, secondValidator, firstValidator} vic.sortValidators(list) assert.Equal(t, list[0], secondValidator) // order not changed for the ones with same public key @@ -1292,7 +1293,7 @@ func TestValidatorInfoCreator_sortValidators(t *testing.T) { } vic, _ := NewValidatorInfoCreator(arguments) - list := []*state.ValidatorInfo{thirdValidator, secondValidator, firstValidator} + list := []state.ValidatorInfoHandler{thirdValidator, secondValidator, firstValidator} vic.sortValidators(list) assert.Equal(t, list[0], firstValidator) // proper sorting diff --git a/epochStart/mock/builtInCostHandlerStub.go b/epochStart/mock/builtInCostHandlerStub.go deleted file mode 100644 index 4ee3b23b062..00000000000 --- a/epochStart/mock/builtInCostHandlerStub.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { -} - -// ComputeBuiltInCost - -func (b *BuiltInCostHandlerStub) ComputeBuiltInCost(_ data.TransactionWithFeeHandler) uint64 { - return 1 -} - -// IsBuiltInFuncCall - -func (b *BuiltInCostHandlerStub) IsBuiltInFuncCall(_ data.TransactionWithFeeHandler) bool { - return false -} - -// IsInterfaceNil - -func (b *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return b == nil -} diff --git a/epochStart/mock/nodesSetupStub.go b/epochStart/mock/nodesSetupStub.go deleted file mode 100644 index 8a79c4330cb..00000000000 --- a/epochStart/mock/nodesSetupStub.go +++ /dev/null @@ -1,191 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" -) - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - GetAdaptivityCalled func() bool - GetHysteresisCalled func() float32 - GetShardIDForPubKeyCalled func(pubkey []byte) (uint32, error) - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - InitialNodesPubKeysCalled func() map[uint32][]string - MinNumberOfMetaNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - MinNumberOfNodesWithHysteresisCalled func() uint32 - MinShardHysteresisNodesCalled func() uint32 - MinMetaHysteresisNodesCalled func() uint32 -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 1 -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 0 -} - -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 0 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 0 -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 0 -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - return nil, nil -} - -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() - } - return nil -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - - return false -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - - return 0 -} - -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) - } - return 0, nil -} - -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) - } - - return []string{"val1", "val2"}, nil -} - -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() - } - - return map[uint32][]string{0: {"val1", "val2"}} -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - - return 1 -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - - return 1 -} - -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() - } - return n.MinNumberOfNodes() -} - -// MinShardHysteresisNodes - -func (n *NodesSetupStub) MinShardHysteresisNodes() uint32 { - if n.MinShardHysteresisNodesCalled != nil { - return n.MinShardHysteresisNodesCalled() - } - return 1 -} - -// MinMetaHysteresisNodes - -func (n *NodesSetupStub) MinMetaHysteresisNodes() uint32 { - if n.MinMetaHysteresisNodesCalled != nil { - return n.MinMetaHysteresisNodesCalled() - } - return 1 -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/epochStart/mock/stakingDataProviderStub.go b/epochStart/mock/stakingDataProviderStub.go deleted file mode 100644 index a2cab61586b..00000000000 --- a/epochStart/mock/stakingDataProviderStub.go +++ /dev/null @@ -1,78 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-go/state" -) - -// StakingDataProviderStub - -type StakingDataProviderStub struct { - CleanCalled func() - PrepareStakingDataCalled func(keys map[uint32][][]byte) error - GetTotalStakeEligibleNodesCalled func() *big.Int - GetTotalTopUpStakeEligibleNodesCalled func() *big.Int - GetNodeStakedTopUpCalled func(blsKey []byte) (*big.Int, error) - FillValidatorInfoCalled func(blsKey []byte) error - ComputeUnQualifiedNodesCalled func(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) -} - -// FillValidatorInfo - -func (sdps *StakingDataProviderStub) FillValidatorInfo(blsKey []byte) error { - if sdps.FillValidatorInfoCalled != nil { - return sdps.FillValidatorInfoCalled(blsKey) - } - return nil -} - -// ComputeUnQualifiedNodes - -func (sdps *StakingDataProviderStub) ComputeUnQualifiedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) { - if sdps.ComputeUnQualifiedNodesCalled != nil { - return sdps.ComputeUnQualifiedNodesCalled(validatorInfos) - } - return nil, nil, nil -} - -// GetTotalStakeEligibleNodes - -func (sdps *StakingDataProviderStub) GetTotalStakeEligibleNodes() *big.Int { - if sdps.GetTotalStakeEligibleNodesCalled != nil { - return sdps.GetTotalStakeEligibleNodesCalled() - } - return big.NewInt(0) -} - -// GetTotalTopUpStakeEligibleNodes - -func (sdps *StakingDataProviderStub) GetTotalTopUpStakeEligibleNodes() *big.Int { - if sdps.GetTotalTopUpStakeEligibleNodesCalled != nil { - return sdps.GetTotalTopUpStakeEligibleNodesCalled() - } - return big.NewInt(0) -} - -// GetNodeStakedTopUp - -func (sdps *StakingDataProviderStub) GetNodeStakedTopUp(blsKey []byte) (*big.Int, error) { - if sdps.GetNodeStakedTopUpCalled != nil { - return sdps.GetNodeStakedTopUpCalled(blsKey) - } - return big.NewInt(0), nil -} - -// PrepareStakingDataForRewards - -func (sdps *StakingDataProviderStub) PrepareStakingDataForRewards(keys map[uint32][][]byte) error { - if sdps.PrepareStakingDataCalled != nil { - return sdps.PrepareStakingDataCalled(keys) - } - return nil -} - -// Clean - -func (sdps *StakingDataProviderStub) Clean() { - if sdps.CleanCalled != nil { - sdps.CleanCalled() - } -} - -// IsInterfaceNil - -func (sdps *StakingDataProviderStub) IsInterfaceNil() bool { - return sdps == nil -} diff --git a/epochStart/mock/validatorStatisticsProcessorStub.go b/epochStart/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 4bb574a5ba5..00000000000 --- a/epochStart/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,38 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - IsInterfaceNilCalled func() bool -} - -// Process - -func (pm *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if pm.ProcessCalled != nil { - return pm.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (pm *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if pm.CommitCalled != nil { - return pm.CommitCalled() - } - - return nil, nil -} - -// IsInterfaceNil - -func (pm *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - if pm.IsInterfaceNilCalled != nil { - return pm.IsInterfaceNilCalled() - } - return false -} diff --git a/epochStart/notifier/errors.go b/epochStart/notifier/errors.go new file mode 100644 index 00000000000..eba24016fa1 --- /dev/null +++ b/epochStart/notifier/errors.go @@ -0,0 +1,5 @@ +package notifier + +import "errors" + +var errNoMaxNodesConfigChangeForStakingV4 = errors.New("no MaxNodesChangeEnableEpoch config found for EpochEnable = StakingV4Step3EnableEpoch") diff --git a/epochStart/notifier/nodesConfigProvider.go b/epochStart/notifier/nodesConfigProvider.go new file mode 100644 index 00000000000..273f750ae44 --- /dev/null +++ b/epochStart/notifier/nodesConfigProvider.go @@ -0,0 +1,82 @@ +package notifier + +import ( + "sort" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/process" +) + +type nodesConfigProvider struct { + mutex sync.RWMutex + currentEpoch uint32 + currentNodesConfig config.MaxNodesChangeConfig + allNodesConfigs []config.MaxNodesChangeConfig +} + +// NewNodesConfigProvider returns a new instance of nodesConfigProvider, which provides the current +// config.MaxNodesChangeConfig based on the current epoch +func NewNodesConfigProvider( + epochNotifier process.EpochNotifier, + maxNodesEnableConfig []config.MaxNodesChangeConfig, +) (*nodesConfigProvider, error) { + if check.IfNil(epochNotifier) { + return nil, epochStart.ErrNilEpochNotifier + } + + ncp := &nodesConfigProvider{ + allNodesConfigs: make([]config.MaxNodesChangeConfig, len(maxNodesEnableConfig)), + } + copy(ncp.allNodesConfigs, maxNodesEnableConfig) + ncp.sortConfigs() + epochNotifier.RegisterNotifyHandler(ncp) + + return ncp, nil +} + +func (ncp *nodesConfigProvider) sortConfigs() { + ncp.mutex.Lock() + defer ncp.mutex.Unlock() + + sort.Slice(ncp.allNodesConfigs, func(i, j int) bool { + return ncp.allNodesConfigs[i].EpochEnable < ncp.allNodesConfigs[j].EpochEnable + }) +} + +// GetAllNodesConfig returns all config.MaxNodesChangeConfig +func (ncp *nodesConfigProvider) GetAllNodesConfig() []config.MaxNodesChangeConfig { + ncp.mutex.RLock() + defer ncp.mutex.RUnlock() + + return ncp.allNodesConfigs +} + +// GetCurrentNodesConfig returns the current config.MaxNodesChangeConfig, based on epoch +func (ncp *nodesConfigProvider) GetCurrentNodesConfig() config.MaxNodesChangeConfig { + ncp.mutex.RLock() + defer ncp.mutex.RUnlock() + + return ncp.currentNodesConfig +} + +// EpochConfirmed is called whenever a new epoch is confirmed +func (ncp *nodesConfigProvider) EpochConfirmed(epoch uint32, _ uint64) { + ncp.mutex.Lock() + defer ncp.mutex.Unlock() + + for _, maxNodesConfig := range ncp.allNodesConfigs { + if epoch >= maxNodesConfig.EpochEnable { + ncp.currentNodesConfig = maxNodesConfig + } + } + + ncp.currentEpoch = epoch +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ncp *nodesConfigProvider) IsInterfaceNil() bool { + return ncp == nil +} diff --git a/epochStart/notifier/nodesConfigProviderAPI.go b/epochStart/notifier/nodesConfigProviderAPI.go new file mode 100644 index 00000000000..3db0d028ece --- /dev/null +++ b/epochStart/notifier/nodesConfigProviderAPI.go @@ -0,0 +1,71 @@ +package notifier + +import ( + "fmt" + + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" +) + +type nodesConfigProviderAPI struct { + *nodesConfigProvider + stakingV4Step2Epoch uint32 + stakingV4Step3MaxNodesConfig config.MaxNodesChangeConfig +} + +// NewNodesConfigProviderAPI returns a new instance of nodes config provider for API calls only, which provides the current +// max nodes change config based on the current epoch +func NewNodesConfigProviderAPI( + epochNotifier process.EpochNotifier, + cfg config.EnableEpochs, +) (*nodesConfigProviderAPI, error) { + nodesCfgProvider, err := NewNodesConfigProvider(epochNotifier, cfg.MaxNodesChangeEnableEpoch) + if err != nil { + return nil, err + } + + stakingV4Step3MaxNodesConfig, err := getStakingV4Step3MaxNodesConfig(nodesCfgProvider.allNodesConfigs, cfg.StakingV4Step3EnableEpoch) + if err != nil { + return nil, err + } + + return &nodesConfigProviderAPI{ + nodesConfigProvider: nodesCfgProvider, + stakingV4Step2Epoch: cfg.StakingV4Step2EnableEpoch, + stakingV4Step3MaxNodesConfig: stakingV4Step3MaxNodesConfig, + }, nil +} + +func getStakingV4Step3MaxNodesConfig( + allNodesConfigs []config.MaxNodesChangeConfig, + stakingV4Step3EnableEpoch uint32, +) (config.MaxNodesChangeConfig, error) { + for _, cfg := range allNodesConfigs { + if cfg.EpochEnable == stakingV4Step3EnableEpoch { + return cfg, nil + } + } + + return config.MaxNodesChangeConfig{}, fmt.Errorf("%w when creating api nodes config provider", errNoMaxNodesConfigChangeForStakingV4) +} + +// GetCurrentNodesConfig retrieves the current configuration of nodes. However, when invoked during epoch stakingV4 step 2 +// through API calls, it will provide the nodes configuration as it will appear in epoch stakingV4 step 3. This adjustment +// is made because, with the transition to step 3 at the epoch change, the maximum number of nodes will be reduced. +// Therefore, calling this API during step 2 aims to offer a preview of the upcoming epoch, accurately reflecting the +// adjusted number of nodes that will qualify from the auction. +func (ncp *nodesConfigProviderAPI) GetCurrentNodesConfig() config.MaxNodesChangeConfig { + ncp.mutex.RLock() + defer ncp.mutex.RUnlock() + + if ncp.currentEpoch == ncp.stakingV4Step2Epoch { + return ncp.stakingV4Step3MaxNodesConfig + } + + return ncp.currentNodesConfig +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ncp *nodesConfigProviderAPI) IsInterfaceNil() bool { + return ncp == nil +} diff --git a/epochStart/notifier/nodesConfigProviderAPI_test.go b/epochStart/notifier/nodesConfigProviderAPI_test.go new file mode 100644 index 00000000000..5438d533741 --- /dev/null +++ b/epochStart/notifier/nodesConfigProviderAPI_test.go @@ -0,0 +1,95 @@ +package notifier + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" + epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/stretchr/testify/require" +) + +func getEnableEpochCfg() config.EnableEpochs { + return config.EnableEpochs{ + StakingV4Step1EnableEpoch: 2, + StakingV4Step2EnableEpoch: 3, + StakingV4Step3EnableEpoch: 4, + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 64, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 4, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + }, + } +} + +func TestNewNodesConfigProviderAPI(t *testing.T) { + t.Parallel() + + t.Run("nil epoch notifier, should return error", func(t *testing.T) { + ncp, err := NewNodesConfigProviderAPI(nil, config.EnableEpochs{}) + require.Equal(t, process.ErrNilEpochNotifier, err) + require.Nil(t, ncp) + }) + + t.Run("no nodes config for staking v4 step 3, should return error", func(t *testing.T) { + ncp, err := NewNodesConfigProviderAPI(&epochNotifierMock.EpochNotifierStub{}, config.EnableEpochs{}) + require.ErrorIs(t, err, errNoMaxNodesConfigChangeForStakingV4) + require.Nil(t, ncp) + }) + + t.Run("should work", func(t *testing.T) { + ncp, err := NewNodesConfigProviderAPI(&epochNotifierMock.EpochNotifierStub{}, getEnableEpochCfg()) + require.Nil(t, err) + require.False(t, ncp.IsInterfaceNil()) + }) +} + +func TestNodesConfigProviderAPI_GetCurrentNodesConfig(t *testing.T) { + t.Parallel() + + epochNotifier := forking.NewGenericEpochNotifier() + enableEpochCfg := getEnableEpochCfg() + ncp, _ := NewNodesConfigProviderAPI(epochNotifier, enableEpochCfg) + + maxNodesConfig1 := enableEpochCfg.MaxNodesChangeEnableEpoch[0] + maxNodesConfig2 := enableEpochCfg.MaxNodesChangeEnableEpoch[1] + maxNodesConfigStakingV4Step3 := enableEpochCfg.MaxNodesChangeEnableEpoch[2] + + require.Equal(t, maxNodesConfig1, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step1EnableEpoch}) + require.Equal(t, maxNodesConfig2, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step2EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step3EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step3EnableEpoch + 1}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step2EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step3EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) +} diff --git a/epochStart/notifier/nodesConfigProvider_test.go b/epochStart/notifier/nodesConfigProvider_test.go new file mode 100644 index 00000000000..a813ff4b48d --- /dev/null +++ b/epochStart/notifier/nodesConfigProvider_test.go @@ -0,0 +1,121 @@ +package notifier + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" + "github.com/stretchr/testify/require" +) + +func TestNewNodesConfigProvider(t *testing.T) { + t.Parallel() + + ncp, err := NewNodesConfigProvider(nil, nil) + require.Equal(t, process.ErrNilEpochNotifier, err) + require.True(t, ncp.IsInterfaceNil()) + + epochNotifier := forking.NewGenericEpochNotifier() + ncp, err = NewNodesConfigProvider(epochNotifier, nil) + require.Nil(t, err) + require.False(t, ncp.IsInterfaceNil()) +} + +func TestNodesConfigProvider_GetAllNodesConfigSorted(t *testing.T) { + t.Parallel() + + nodesConfigEpoch0 := config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + } + nodesConfigEpoch1 := config.MaxNodesChangeConfig{ + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + } + nodesConfigEpoch6 := config.MaxNodesChangeConfig{ + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 1, + } + + unsortedNodesConfig := []config.MaxNodesChangeConfig{ + nodesConfigEpoch6, + nodesConfigEpoch0, + nodesConfigEpoch1, + } + sortedNodesConfig := []config.MaxNodesChangeConfig{ + nodesConfigEpoch0, + nodesConfigEpoch1, + nodesConfigEpoch6, + } + + epochNotifier := forking.NewGenericEpochNotifier() + ncp, _ := NewNodesConfigProvider(epochNotifier, unsortedNodesConfig) + require.Equal(t, sortedNodesConfig, ncp.GetAllNodesConfig()) +} + +func TestNodesConfigProvider_EpochConfirmedCorrectMaxNumNodesAfterNodeRestart(t *testing.T) { + t.Parallel() + + nodesConfigEpoch0 := config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + } + nodesConfigEpoch1 := config.MaxNodesChangeConfig{ + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + } + nodesConfigEpoch6 := config.MaxNodesChangeConfig{ + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 1, + } + + allNodesConfig := []config.MaxNodesChangeConfig{ + nodesConfigEpoch0, + nodesConfigEpoch1, + nodesConfigEpoch6, + } + epochNotifier := forking.NewGenericEpochNotifier() + ncp, _ := NewNodesConfigProvider(epochNotifier, allNodesConfig) + + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + require.Equal(t, nodesConfigEpoch0, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: 1}) + require.Equal(t, nodesConfigEpoch1, ncp.GetCurrentNodesConfig()) + + for epoch := uint32(2); epoch <= 5; epoch++ { + epochNotifier.CheckEpoch(&block.Header{Epoch: epoch}) + require.Equal(t, nodesConfigEpoch1, ncp.GetCurrentNodesConfig()) + } + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: 5}) + require.Equal(t, nodesConfigEpoch1, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: 6}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: 6}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) + + for epoch := uint32(7); epoch <= 20; epoch++ { + epochNotifier.CheckEpoch(&block.Header{Epoch: epoch}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) + } + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 1}) + epochNotifier.CheckEpoch(&block.Header{Epoch: 21}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) +} diff --git a/epochStart/shardchain/trigger.go b/epochStart/shardchain/trigger.go index e3f09fdf2a0..496702b8d81 100644 --- a/epochStart/shardchain/trigger.go +++ b/epochStart/shardchain/trigger.go @@ -46,14 +46,15 @@ type ArgsShardEpochStartTrigger struct { HeaderValidator epochStart.HeaderValidator Uint64Converter typeConverters.Uint64ByteSliceConverter - DataPool dataRetriever.PoolsHolder - Storage dataRetriever.StorageService - RequestHandler epochStart.RequestHandler - EpochStartNotifier epochStart.Notifier - PeerMiniBlocksSyncer process.ValidatorInfoSyncer - RoundHandler process.RoundHandler - AppStatusHandler core.AppStatusHandler - EnableEpochsHandler common.EnableEpochsHandler + DataPool dataRetriever.PoolsHolder + Storage dataRetriever.StorageService + RequestHandler epochStart.RequestHandler + EpochStartNotifier epochStart.Notifier + PeerMiniBlocksSyncer process.ValidatorInfoSyncer + RoundHandler process.RoundHandler + AppStatusHandler core.AppStatusHandler + EnableEpochsHandler common.EnableEpochsHandler + ExtraDelayForRequestBlockInfo time.Duration Epoch uint32 Validity uint64 @@ -112,6 +113,8 @@ type trigger struct { mutMissingMiniBlocks sync.RWMutex mutMissingValidatorsInfo sync.RWMutex cancelFunc func() + + extraDelayForRequestBlockInfo time.Duration } type metaInfo struct { @@ -221,10 +224,14 @@ func NewEpochStartTrigger(args *ArgsShardEpochStartTrigger) (*trigger, error) { return nil, err } - trigggerStateKey := common.TriggerRegistryInitialKeyPrefix + fmt.Sprintf("%d", args.Epoch) + if args.ExtraDelayForRequestBlockInfo != common.ExtraDelayForRequestBlockInfo { + log.Warn("different delay for request block info: the epoch change trigger might not behave normally", + "value from config", args.ExtraDelayForRequestBlockInfo.String(), "expected", common.ExtraDelayForRequestBlockInfo.String()) + } + triggerStateKey := common.TriggerRegistryInitialKeyPrefix + fmt.Sprintf("%d", args.Epoch) t := &trigger{ - triggerStateKey: []byte(trigggerStateKey), + triggerStateKey: []byte(triggerStateKey), epoch: args.Epoch, metaEpoch: args.Epoch, currentRoundIndex: 0, @@ -260,6 +267,7 @@ func NewEpochStartTrigger(args *ArgsShardEpochStartTrigger) (*trigger, error) { appStatusHandler: args.AppStatusHandler, roundHandler: args.RoundHandler, enableEpochsHandler: args.EnableEpochsHandler, + extraDelayForRequestBlockInfo: args.ExtraDelayForRequestBlockInfo, } t.headersPool.RegisterHandler(t.receivedMetaBlock) @@ -586,7 +594,7 @@ func (t *trigger) receivedMetaBlock(headerHandler data.HeaderHandler, metaBlockH t.newEpochHdrReceived = true t.mapEpochStartHdrs[string(metaBlockHash)] = metaHdr // waiting for late broadcast of mini blocks and transactions to be done and received - wait := common.ExtraDelayForRequestBlockInfo + wait := t.extraDelayForRequestBlockInfo roundDifferences := t.roundHandler.Index() - int64(headerHandler.GetRound()) if roundDifferences > 1 { wait = 0 diff --git a/errors/errors.go b/errors/errors.go index 81f547d8bea..b1edc990afc 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -593,5 +593,11 @@ var ErrEmptyAddress = errors.New("empty Address") // ErrInvalidNodeOperationMode signals that an invalid node operation mode has been provided var ErrInvalidNodeOperationMode = errors.New("invalid node operation mode") -// ErrNilTxExecutionOrderHandler signals that a nil tx execution order handler has been provided -var ErrNilTxExecutionOrderHandler = errors.New("nil tx execution order handler") +// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") + +// ErrNilEpochSystemSCProcessor defines the error for setting a nil EpochSystemSCProcessor +var ErrNilEpochSystemSCProcessor = errors.New("nil epoch system SC processor") + +// ErrNilRelayedTxV3Processor signals that a nil relayed tx v3 processor has been provided +var ErrNilRelayedTxV3Processor = errors.New("nil relayed tx v3 processor") diff --git a/facade/initial/initialNodeFacade.go b/facade/initial/initialNodeFacade.go index 1d45caace60..7411a2078e9 100644 --- a/facade/initial/initialNodeFacade.go +++ b/facade/initial/initialNodeFacade.go @@ -156,6 +156,11 @@ func (inf *initialNodeFacade) ValidatorStatisticsApi() (map[string]*validator.Va return nil, errNodeStarting } +// AuctionListApi returns nil and error +func (inf *initialNodeFacade) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + return nil, errNodeStarting +} + // SendBulkTransactions returns 0 and error func (inf *initialNodeFacade) SendBulkTransactions(_ []*transaction.Transaction) (uint64, error) { return uint64(0), errNodeStarting @@ -426,6 +431,11 @@ func (inf *initialNodeFacade) GetManagedKeys() []string { return nil } +// GetLoadedKeys returns nil +func (inf *initialNodeFacade) GetLoadedKeys() []string { + return nil +} + // GetEligibleManagedKeys returns nil and error func (inf *initialNodeFacade) GetEligibleManagedKeys() ([]string, error) { return nil, errNodeStarting diff --git a/facade/initial/initialNodeFacade_test.go b/facade/initial/initialNodeFacade_test.go index 2633349d69f..294f0accfca 100644 --- a/facade/initial/initialNodeFacade_test.go +++ b/facade/initial/initialNodeFacade_test.go @@ -95,6 +95,10 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Nil(t, v1) assert.Equal(t, errNodeStarting, err) + v2, err := inf.AuctionListApi() + assert.Nil(t, v2) + assert.Equal(t, errNodeStarting, err) + u1, err := inf.SendBulkTransactions(nil) assert.Equal(t, uint64(0), u1) assert.Equal(t, errNodeStarting, err) @@ -329,12 +333,15 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Nil(t, txPoolGaps) assert.Equal(t, errNodeStarting, err) - cnt := inf.GetManagedKeysCount() - assert.Zero(t, cnt) + count := inf.GetManagedKeysCount() + assert.Zero(t, count) keys := inf.GetManagedKeys() assert.Nil(t, keys) + keys = inf.GetLoadedKeys() + assert.Nil(t, keys) + keys, err = inf.GetEligibleManagedKeys() assert.Nil(t, keys) assert.Equal(t, errNodeStarting, err) diff --git a/facade/interface.go b/facade/interface.go index 4c782e6a574..35f185874ed 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -5,6 +5,7 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + coreData "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" @@ -75,6 +76,10 @@ type NodeHandler interface { // about the account correlated with provided address GetAccount(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) + // GetAccountWithKeys returns an accountResponse containing information + // about the account correlated with provided address and all keys + GetAccountWithKeys(address string, options api.AccountQueryOptions, ctx context.Context) (api.AccountResponse, api.BlockInfo, error) + // GetCode returns the code for the given code hash GetCode(codeHash []byte, options api.AccountQueryOptions) ([]byte, api.BlockInfo) @@ -86,6 +91,8 @@ type NodeHandler interface { // ValidatorStatisticsApi return the statistics for all the validators ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error IsSelfTrigger() bool @@ -106,7 +113,7 @@ type NodeHandler interface { // TransactionSimulatorProcessor defines the actions which a transaction simulator processor has to implement type TransactionSimulatorProcessor interface { - ProcessTx(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) + ProcessTx(tx *transaction.Transaction, currentHeader coreData.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) IsInterfaceNil() bool } @@ -142,6 +149,7 @@ type ApiResolver interface { GetGasConfigs() map[string]map[string]uint64 GetManagedKeysCount() int GetManagedKeys() []string + GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) diff --git a/facade/mock/apiResolverStub.go b/facade/mock/apiResolverStub.go index e2ab9aa3707..33bae8518aa 100644 --- a/facade/mock/apiResolverStub.go +++ b/facade/mock/apiResolverStub.go @@ -46,6 +46,7 @@ type ApiResolverStub struct { GetGasConfigsCalled func() map[string]map[string]uint64 GetManagedKeysCountCalled func() int GetManagedKeysCalled func() []string + GetLoadedKeysCalled func() []string GetEligibleManagedKeysCalled func() ([]string, error) GetWaitingManagedKeysCalled func() ([]string, error) GetWaitingEpochsLeftForPublicKeyCalled func(publicKey string) (uint32, error) @@ -309,6 +310,14 @@ func (ars *ApiResolverStub) GetManagedKeys() []string { return make([]string, 0) } +// GetLoadedKeys - +func (ars *ApiResolverStub) GetLoadedKeys() []string { + if ars.GetLoadedKeysCalled != nil { + return ars.GetLoadedKeysCalled() + } + return make([]string, 0) +} + // GetEligibleManagedKeys - func (ars *ApiResolverStub) GetEligibleManagedKeys() ([]string, error) { if ars.GetEligibleManagedKeysCalled != nil { diff --git a/facade/mock/nodeStub.go b/facade/mock/nodeStub.go index 254f92218ba..1e779e0ebce 100644 --- a/facade/mock/nodeStub.go +++ b/facade/mock/nodeStub.go @@ -26,6 +26,7 @@ type NodeStub struct { ValidateTransactionForSimulationCalled func(tx *transaction.Transaction, bypassSignature bool) error SendBulkTransactionsHandler func(txs []*transaction.Transaction) (uint64, error) GetAccountCalled func(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) + GetAccountWithKeysCalled func(address string, options api.AccountQueryOptions, ctx context.Context) (api.AccountResponse, api.BlockInfo, error) GetCodeCalled func(codeHash []byte, options api.AccountQueryOptions) ([]byte, api.BlockInfo) GetCurrentPublicKeyHandler func() string GenerateAndSendBulkTransactionsHandler func(destination string, value *big.Int, nrTransactions uint64) error @@ -54,6 +55,7 @@ type NodeStub struct { VerifyProofCalled func(rootHash string, address string, proof [][]byte) (bool, error) GetTokenSupplyCalled func(token string) (*api.ESDTSupply, error) IsDataTrieMigratedCalled func(address string, options api.AccountQueryOptions) (bool, error) + AuctionListApiCalled func() ([]*common.AuctionListValidatorAPIResponse, error) } // GetProof - @@ -139,7 +141,11 @@ func (ns *NodeStub) DecodeAddressPubkey(pk string) ([]byte, error) { // GetBalance - func (ns *NodeStub) GetBalance(address string, options api.AccountQueryOptions) (*big.Int, api.BlockInfo, error) { - return ns.GetBalanceCalled(address, options) + if ns.GetBalanceCalled != nil { + return ns.GetBalanceCalled(address, options) + } + + return nil, api.BlockInfo{}, nil } // CreateTransaction - @@ -150,22 +156,47 @@ func (ns *NodeStub) CreateTransaction(txArgs *external.ArgsCreateTransaction) (* // ValidateTransaction - func (ns *NodeStub) ValidateTransaction(tx *transaction.Transaction) error { - return ns.ValidateTransactionHandler(tx) + if ns.ValidateTransactionHandler != nil { + return ns.ValidateTransactionHandler(tx) + } + + return nil } // ValidateTransactionForSimulation - func (ns *NodeStub) ValidateTransactionForSimulation(tx *transaction.Transaction, bypassSignature bool) error { - return ns.ValidateTransactionForSimulationCalled(tx, bypassSignature) + if ns.ValidateTransactionForSimulationCalled != nil { + return ns.ValidateTransactionForSimulationCalled(tx, bypassSignature) + } + + return nil } // SendBulkTransactions - func (ns *NodeStub) SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) { - return ns.SendBulkTransactionsHandler(txs) + if ns.SendBulkTransactionsHandler != nil { + return ns.SendBulkTransactionsHandler(txs) + } + + return 0, nil } // GetAccount - func (ns *NodeStub) GetAccount(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - return ns.GetAccountCalled(address, options) + if ns.GetAccountCalled != nil { + return ns.GetAccountCalled(address, options) + } + + return api.AccountResponse{}, api.BlockInfo{}, nil +} + +// GetAccountWithKeys - +func (ns *NodeStub) GetAccountWithKeys(address string, options api.AccountQueryOptions, ctx context.Context) (api.AccountResponse, api.BlockInfo, error) { + if ns.GetAccountWithKeysCalled != nil { + return ns.GetAccountWithKeysCalled(address, options, ctx) + } + + return api.AccountResponse{}, api.BlockInfo{}, nil } // GetCode - @@ -179,22 +210,47 @@ func (ns *NodeStub) GetCode(codeHash []byte, options api.AccountQueryOptions) ([ // GetHeartbeats - func (ns *NodeStub) GetHeartbeats() []data.PubKeyHeartbeat { - return ns.GetHeartbeatsHandler() + if ns.GetHeartbeatsHandler != nil { + return ns.GetHeartbeatsHandler() + } + + return nil } // ValidatorStatisticsApi - func (ns *NodeStub) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { - return ns.ValidatorStatisticsApiCalled() + if ns.ValidatorStatisticsApiCalled != nil { + return ns.ValidatorStatisticsApiCalled() + } + + return nil, nil +} + +// AuctionListApi - +func (ns *NodeStub) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + if ns.AuctionListApiCalled != nil { + return ns.AuctionListApiCalled() + } + + return nil, nil } // DirectTrigger - func (ns *NodeStub) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error { - return ns.DirectTriggerCalled(epoch, withEarlyEndOfEpoch) + if ns.DirectTriggerCalled != nil { + return ns.DirectTriggerCalled(epoch, withEarlyEndOfEpoch) + } + + return nil } // IsSelfTrigger - func (ns *NodeStub) IsSelfTrigger() bool { - return ns.IsSelfTriggerCalled() + if ns.IsSelfTriggerCalled != nil { + return ns.IsSelfTriggerCalled() + } + + return false } // GetQueryHandler - diff --git a/facade/nodeFacade.go b/facade/nodeFacade.go index f91a405a96c..8bc696b6adc 100644 --- a/facade/nodeFacade.go +++ b/facade/nodeFacade.go @@ -163,7 +163,7 @@ func (nf *nodeFacade) RestAPIServerDebugMode() bool { // RestApiInterface returns the interface on which the rest API should start on, based on the config file provided. // The API will start on the DefaultRestInterface value unless a correct value is passed or -// the value is explicitly set to off, in which case it will not start at all +// // the value is explicitly set to off, in which case it will not start at all func (nf *nodeFacade) RestApiInterface() string { if nf.config.RestApiInterface == "" { return DefaultRestInterface @@ -284,6 +284,11 @@ func (nf *nodeFacade) ValidatorStatisticsApi() (map[string]*validator.ValidatorS return nf.node.ValidatorStatisticsApi() } +// AuctionListApi will return the data about the validators in the auction list +func (nf *nodeFacade) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + return nf.node.AuctionListApi() +} + // SendBulkTransactions will send a bulk of transactions on the topic channel func (nf *nodeFacade) SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) { return nf.node.SendBulkTransactions(txs) @@ -331,7 +336,19 @@ func (nf *nodeFacade) ComputeTransactionGasLimit(tx *transaction.Transaction) (* // GetAccount returns a response containing information about the account correlated with provided address func (nf *nodeFacade) GetAccount(address string, options apiData.AccountQueryOptions) (apiData.AccountResponse, apiData.BlockInfo, error) { - accountResponse, blockInfo, err := nf.node.GetAccount(address, options) + var accountResponse apiData.AccountResponse + var blockInfo apiData.BlockInfo + var err error + + if options.WithKeys { + ctx, cancel := nf.getContextForApiTrieRangeOperations() + defer cancel() + + accountResponse, blockInfo, err = nf.node.GetAccountWithKeys(address, options, ctx) + } else { + accountResponse, blockInfo, err = nf.node.GetAccount(address, options) + } + if err != nil { return apiData.AccountResponse{}, apiData.BlockInfo{}, err } @@ -354,13 +371,19 @@ func (nf *nodeFacade) GetAccounts(addresses []string, options apiData.AccountQue response := make(map[string]*apiData.AccountResponse) var blockInfo apiData.BlockInfo - for _, address := range addresses { + for i, address := range addresses { accountResponse, blockInfoForAccount, err := nf.node.GetAccount(address, options) if err != nil { return nil, apiData.BlockInfo{}, err } - - blockInfo = blockInfoForAccount + // Use the first block info as the block info for the whole bulk + if i == 0 { + blockInfo = blockInfoForAccount + blockRootHash, errBlockRootHash := hex.DecodeString(blockInfoForAccount.RootHash) + if errBlockRootHash == nil { + options.BlockRootHash = blockRootHash + } + } codeHash := accountResponse.CodeHash code, _ := nf.node.GetCode(codeHash, options) @@ -590,11 +613,16 @@ func (nf *nodeFacade) GetManagedKeysCount() int { return nf.apiResolver.GetManagedKeysCount() } -// GetManagedKeys returns all keys managed by the current node when running in multikey mode +// GetManagedKeys returns all keys that should act as validator(main or backup that took over) and will be managed by this node func (nf *nodeFacade) GetManagedKeys() []string { return nf.apiResolver.GetManagedKeys() } +// GetLoadedKeys returns all keys that were loaded by this node +func (nf *nodeFacade) GetLoadedKeys() []string { + return nf.apiResolver.GetLoadedKeys() +} + // GetEligibleManagedKeys returns the eligible managed keys when node is running in multikey mode func (nf *nodeFacade) GetEligibleManagedKeys() ([]string, error) { return nf.apiResolver.GetEligibleManagedKeys() diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index 299fa1cfb50..21823b60b6e 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -1317,6 +1317,22 @@ func TestNodeFacade_GetEligibleManagedKeys(t *testing.T) { assert.Equal(t, expectedResult, result) } +func TestNodeFacade_GetLoadedKeys(t *testing.T) { + t.Parallel() + + providedLoadedKeys := []string{"pk1", "pk2"} + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetLoadedKeysCalled: func() []string { + return providedLoadedKeys + }, + } + nf, _ := NewNodeFacade(arg) + + keys := nf.GetLoadedKeys() + require.Equal(t, providedLoadedKeys, keys) +} + func TestNodeFacade_GetWaitingEpochsLeftForPublicKey(t *testing.T) { t.Parallel() diff --git a/factory/api/apiResolverFactory.go b/factory/api/apiResolverFactory.go index ed3610ca42d..dfefa56ff94 100644 --- a/factory/api/apiResolverFactory.go +++ b/factory/api/apiResolverFactory.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/disabled" + "github.com/multiversx/mx-chain-go/common/operationmodes" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" @@ -71,40 +72,42 @@ type ApiResolverArgs struct { } type scQueryServiceArgs struct { - generalConfig *config.Config - epochConfig *config.EpochConfig - coreComponents factory.CoreComponentsHolder - stateComponents factory.StateComponentsHolder - dataComponents factory.DataComponentsHolder - processComponents factory.ProcessComponentsHolder - statusCoreComponents factory.StatusCoreComponentsHolder - gasScheduleNotifier core.GasScheduleNotifier - messageSigVerifier vm.MessageSignVerifier - systemSCConfig *config.SystemSmartContractsConfig - bootstrapper process.Bootstrapper - guardedAccountHandler process.GuardedAccountHandler - allowVMQueriesChan chan struct{} - workingDir string - processingMode common.NodeProcessingMode + generalConfig *config.Config + epochConfig *config.EpochConfig + coreComponents factory.CoreComponentsHolder + stateComponents factory.StateComponentsHolder + dataComponents factory.DataComponentsHolder + processComponents factory.ProcessComponentsHolder + statusCoreComponents factory.StatusCoreComponentsHolder + gasScheduleNotifier core.GasScheduleNotifier + messageSigVerifier vm.MessageSignVerifier + systemSCConfig *config.SystemSmartContractsConfig + bootstrapper process.Bootstrapper + guardedAccountHandler process.GuardedAccountHandler + allowVMQueriesChan chan struct{} + workingDir string + processingMode common.NodeProcessingMode + isInHistoricalBalancesMode bool } type scQueryElementArgs struct { - generalConfig *config.Config - epochConfig *config.EpochConfig - coreComponents factory.CoreComponentsHolder - stateComponents factory.StateComponentsHolder - dataComponents factory.DataComponentsHolder - processComponents factory.ProcessComponentsHolder - statusCoreComponents factory.StatusCoreComponentsHolder - gasScheduleNotifier core.GasScheduleNotifier - messageSigVerifier vm.MessageSignVerifier - systemSCConfig *config.SystemSmartContractsConfig - bootstrapper process.Bootstrapper - guardedAccountHandler process.GuardedAccountHandler - allowVMQueriesChan chan struct{} - workingDir string - index int - processingMode common.NodeProcessingMode + generalConfig *config.Config + epochConfig *config.EpochConfig + coreComponents factory.CoreComponentsHolder + stateComponents factory.StateComponentsHolder + dataComponents factory.DataComponentsHolder + processComponents factory.ProcessComponentsHolder + statusCoreComponents factory.StatusCoreComponentsHolder + gasScheduleNotifier core.GasScheduleNotifier + messageSigVerifier vm.MessageSignVerifier + systemSCConfig *config.SystemSmartContractsConfig + bootstrapper process.Bootstrapper + guardedAccountHandler process.GuardedAccountHandler + allowVMQueriesChan chan struct{} + workingDir string + index int + processingMode common.NodeProcessingMode + isInHistoricalBalancesMode bool } // CreateApiResolver is able to create an ApiResolver instance that will solve the REST API requests through the node facade @@ -112,24 +115,25 @@ type scQueryElementArgs struct { func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { apiWorkingDir := filepath.Join(args.Configs.FlagsConfig.WorkingDir, common.TemporaryPath) argsSCQuery := &scQueryServiceArgs{ - generalConfig: args.Configs.GeneralConfig, - epochConfig: args.Configs.EpochConfig, - coreComponents: args.CoreComponents, - dataComponents: args.DataComponents, - stateComponents: args.StateComponents, - processComponents: args.ProcessComponents, - statusCoreComponents: args.StatusCoreComponents, - gasScheduleNotifier: args.GasScheduleNotifier, - messageSigVerifier: args.CryptoComponents.MessageSignVerifier(), - systemSCConfig: args.Configs.SystemSCConfig, - bootstrapper: args.Bootstrapper, - guardedAccountHandler: args.BootstrapComponents.GuardedAccountHandler(), - allowVMQueriesChan: args.AllowVMQueriesChan, - workingDir: apiWorkingDir, - processingMode: args.ProcessingMode, - } - - scQueryService, err := createScQueryService(argsSCQuery) + generalConfig: args.Configs.GeneralConfig, + epochConfig: args.Configs.EpochConfig, + coreComponents: args.CoreComponents, + dataComponents: args.DataComponents, + stateComponents: args.StateComponents, + processComponents: args.ProcessComponents, + statusCoreComponents: args.StatusCoreComponents, + gasScheduleNotifier: args.GasScheduleNotifier, + messageSigVerifier: args.CryptoComponents.MessageSignVerifier(), + systemSCConfig: args.Configs.SystemSCConfig, + bootstrapper: args.Bootstrapper, + guardedAccountHandler: args.BootstrapComponents.GuardedAccountHandler(), + allowVMQueriesChan: args.AllowVMQueriesChan, + workingDir: apiWorkingDir, + processingMode: args.ProcessingMode, + isInHistoricalBalancesMode: operationmodes.IsInHistoricalBalancesMode(args.Configs), + } + + scQueryService, storageManagers, err := createScQueryService(argsSCQuery) if err != nil { return nil, err } @@ -271,7 +275,9 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { AccountsParser: args.ProcessComponents.AccountsParser(), GasScheduleNotifier: args.GasScheduleNotifier, ManagedPeersMonitor: args.StatusComponents.ManagedPeersMonitor(), + PublicKey: args.CryptoComponents.PublicKeyString(), NodesCoordinator: args.ProcessComponents.NodesCoordinator(), + StorageManagers: storageManagers, } return external.NewNodeApiResolver(argsApiResolver) @@ -279,75 +285,91 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { func createScQueryService( args *scQueryServiceArgs, -) (process.SCQueryService, error) { +) (process.SCQueryService, []common.StorageManager, error) { numConcurrentVms := args.generalConfig.VirtualMachine.Querying.NumConcurrentVMs if numConcurrentVms < 1 { - return nil, fmt.Errorf("VirtualMachine.Querying.NumConcurrentVms should be a positive number more than 1") + return nil, nil, fmt.Errorf("VirtualMachine.Querying.NumConcurrentVms should be a positive number more than 1") } argsQueryElem := &scQueryElementArgs{ - generalConfig: args.generalConfig, - epochConfig: args.epochConfig, - coreComponents: args.coreComponents, - stateComponents: args.stateComponents, - dataComponents: args.dataComponents, - processComponents: args.processComponents, - statusCoreComponents: args.statusCoreComponents, - gasScheduleNotifier: args.gasScheduleNotifier, - messageSigVerifier: args.messageSigVerifier, - systemSCConfig: args.systemSCConfig, - bootstrapper: args.bootstrapper, - guardedAccountHandler: args.guardedAccountHandler, - allowVMQueriesChan: args.allowVMQueriesChan, - workingDir: args.workingDir, - index: 0, - processingMode: args.processingMode, + generalConfig: args.generalConfig, + epochConfig: args.epochConfig, + coreComponents: args.coreComponents, + stateComponents: args.stateComponents, + dataComponents: args.dataComponents, + processComponents: args.processComponents, + statusCoreComponents: args.statusCoreComponents, + gasScheduleNotifier: args.gasScheduleNotifier, + messageSigVerifier: args.messageSigVerifier, + systemSCConfig: args.systemSCConfig, + bootstrapper: args.bootstrapper, + guardedAccountHandler: args.guardedAccountHandler, + allowVMQueriesChan: args.allowVMQueriesChan, + workingDir: args.workingDir, + index: 0, + processingMode: args.processingMode, + isInHistoricalBalancesMode: args.isInHistoricalBalancesMode, } var err error var scQueryService process.SCQueryService + var storageManager common.StorageManager + storageManagers := make([]common.StorageManager, 0, numConcurrentVms) list := make([]process.SCQueryService, 0, numConcurrentVms) for i := 0; i < numConcurrentVms; i++ { argsQueryElem.index = i - scQueryService, err = createScQueryElement(argsQueryElem) + scQueryService, storageManager, err = createScQueryElement(*argsQueryElem) if err != nil { - return nil, err + return nil, nil, err } list = append(list, scQueryService) + storageManagers = append(storageManagers, storageManager) } sqQueryDispatcher, err := smartContract.NewScQueryServiceDispatcher(list) if err != nil { - return nil, err + return nil, nil, err } - return sqQueryDispatcher, nil + return sqQueryDispatcher, storageManagers, nil } func createScQueryElement( - args *scQueryElementArgs, -) (process.SCQueryService, error) { + args scQueryElementArgs, +) (process.SCQueryService, common.StorageManager, error) { var err error + selfShardID := args.processComponents.ShardCoordinator().SelfId() + pkConverter := args.coreComponents.AddressPubKeyConverter() automaticCrawlerAddressesStrings := args.generalConfig.BuiltInFunctions.AutomaticCrawlerAddresses convertedAddresses, errDecode := factory.DecodeAddresses(pkConverter, automaticCrawlerAddressesStrings) if errDecode != nil { - return nil, errDecode + return nil, nil, errDecode } dnsV2AddressesStrings := args.generalConfig.BuiltInFunctions.DNSV2Addresses convertedDNSV2Addresses, errDecode := factory.DecodeAddresses(pkConverter, dnsV2AddressesStrings) if errDecode != nil { - return nil, errDecode + return nil, nil, errDecode + } + + apiBlockchain, err := createBlockchainForScQuery(selfShardID) + if err != nil { + return nil, nil, err + } + + accountsAdapterApi, storageManager, err := createNewAccountsAdapterApi(args, apiBlockchain) + if err != nil { + return nil, nil, err } builtInFuncFactory, err := createBuiltinFuncs( args.gasScheduleNotifier, args.coreComponents.InternalMarshalizer(), - args.stateComponents.AccountsAdapterAPI(), + accountsAdapterApi, args.processComponents.ShardCoordinator(), args.coreComponents.EpochNotifier(), args.coreComponents.EnableEpochsHandler(), @@ -357,13 +379,13 @@ func createScQueryElement( convertedDNSV2Addresses, ) if err != nil { - return nil, err + return nil, nil, err } cacherCfg := storageFactory.GetCacherFromConfig(args.generalConfig.SmartContractDataPool) smartContractsCache, err := storageunit.NewCache(cacherCfg) if err != nil { - return nil, err + return nil, nil, err } scStorage := args.generalConfig.SmartContractsStorageForSCQuery @@ -387,76 +409,76 @@ func createScQueryElement( GasSchedule: args.gasScheduleNotifier, Counter: counters.NewDisabledCounter(), MissingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), + Accounts: accountsAdapterApi, + BlockChain: apiBlockchain, } - var apiBlockchain data.ChainHandler var vmFactory process.VirtualMachinesContainerFactory maxGasForVmQueries := args.generalConfig.VirtualMachine.GasConfig.ShardMaxGasPerVmQuery - if args.processComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if selfShardID == core.MetachainShardId { maxGasForVmQueries = args.generalConfig.VirtualMachine.GasConfig.MetaMaxGasPerVmQuery - apiBlockchain, vmFactory, err = createMetaVmContainerFactory(args, argsHook) + vmFactory, err = createMetaVmContainerFactory(args, argsHook) } else { - apiBlockchain, vmFactory, err = createShardVmContainerFactory(args, argsHook) + vmFactory, err = createShardVmContainerFactory(args, argsHook) } if err != nil { - return nil, err + return nil, nil, err } log.Debug("maximum gas per VM Query", "value", maxGasForVmQueries) vmContainer, err := vmFactory.Create() if err != nil { - return nil, err + return nil, nil, err } err = vmFactory.BlockChainHookImpl().SetVMContainer(vmContainer) if err != nil { - return nil, err + return nil, nil, err } err = builtInFuncFactory.SetPayableHandler(vmFactory.BlockChainHookImpl()) if err != nil { - return nil, err + return nil, nil, err } argsNewSCQueryService := smartContract.ArgsNewSCQueryService{ - VmContainer: vmContainer, - EconomicsFee: args.coreComponents.EconomicsData(), - BlockChainHook: vmFactory.BlockChainHookImpl(), - MainBlockChain: args.dataComponents.Blockchain(), - APIBlockChain: apiBlockchain, - WasmVMChangeLocker: args.coreComponents.WasmVMChangeLocker(), - Bootstrapper: args.bootstrapper, - AllowExternalQueriesChan: args.allowVMQueriesChan, - MaxGasLimitPerQuery: maxGasForVmQueries, - HistoryRepository: args.processComponents.HistoryRepository(), - ShardCoordinator: args.processComponents.ShardCoordinator(), - StorageService: args.dataComponents.StorageService(), - Marshaller: args.coreComponents.InternalMarshalizer(), - Hasher: args.coreComponents.Hasher(), - Uint64ByteSliceConverter: args.coreComponents.Uint64ByteSliceConverter(), - } - - return smartContract.NewSCQueryService(argsNewSCQueryService) + VmContainer: vmContainer, + EconomicsFee: args.coreComponents.EconomicsData(), + BlockChainHook: vmFactory.BlockChainHookImpl(), + MainBlockChain: args.dataComponents.Blockchain(), + APIBlockChain: apiBlockchain, + WasmVMChangeLocker: args.coreComponents.WasmVMChangeLocker(), + Bootstrapper: args.bootstrapper, + AllowExternalQueriesChan: args.allowVMQueriesChan, + MaxGasLimitPerQuery: maxGasForVmQueries, + HistoryRepository: args.processComponents.HistoryRepository(), + ShardCoordinator: args.processComponents.ShardCoordinator(), + StorageService: args.dataComponents.StorageService(), + Marshaller: args.coreComponents.InternalMarshalizer(), + Hasher: args.coreComponents.Hasher(), + Uint64ByteSliceConverter: args.coreComponents.Uint64ByteSliceConverter(), + IsInHistoricalBalancesMode: args.isInHistoricalBalancesMode, + } + + scQueryService, err := smartContract.NewSCQueryService(argsNewSCQueryService) + + return scQueryService, storageManager, err } -func createMetaVmContainerFactory(args *scQueryElementArgs, argsHook hooks.ArgBlockChainHook) (data.ChainHandler, process.VirtualMachinesContainerFactory, error) { - apiBlockchain, err := blockchain.NewMetaChain(disabled.NewAppStatusHandler()) - if err != nil { - return nil, nil, err +func createBlockchainForScQuery(selfShardID uint32) (data.ChainHandler, error) { + isMetachain := selfShardID == core.MetachainShardId + if isMetachain { + return blockchain.NewMetaChain(disabled.NewAppStatusHandler()) } - accountsAdapterApi, err := createNewAccountsAdapterApi(args, apiBlockchain) - if err != nil { - return nil, nil, err - } - - argsHook.BlockChain = apiBlockchain - argsHook.Accounts = accountsAdapterApi + return blockchain.NewBlockChain(disabled.NewAppStatusHandler()) +} +func createMetaVmContainerFactory(args scQueryElementArgs, argsHook hooks.ArgBlockChainHook) (process.VirtualMachinesContainerFactory, error) { blockChainHookImpl, errBlockChainHook := hooks.NewBlockChainHookImpl(argsHook) if errBlockChainHook != nil { - return nil, nil, errBlockChainHook + return nil, errBlockChainHook } argsNewVmFactory := metachain.ArgsNewVMContainerFactory{ @@ -474,38 +496,26 @@ func createMetaVmContainerFactory(args *scQueryElementArgs, argsHook hooks.ArgBl ChanceComputer: args.coreComponents.Rater(), ShardCoordinator: args.processComponents.ShardCoordinator(), EnableEpochsHandler: args.coreComponents.EnableEpochsHandler(), + NodesCoordinator: args.processComponents.NodesCoordinator(), } vmFactory, err := metachain.NewVMContainerFactory(argsNewVmFactory) if err != nil { - return nil, nil, err + return nil, err } - return apiBlockchain, vmFactory, nil + return vmFactory, nil } -func createShardVmContainerFactory(args *scQueryElementArgs, argsHook hooks.ArgBlockChainHook) (data.ChainHandler, process.VirtualMachinesContainerFactory, error) { - apiBlockchain, err := blockchain.NewBlockChain(disabled.NewAppStatusHandler()) - if err != nil { - return nil, nil, err - } - - accountsAdapterApi, err := createNewAccountsAdapterApi(args, apiBlockchain) - if err != nil { - return nil, nil, err - } - - argsHook.BlockChain = apiBlockchain - argsHook.Accounts = accountsAdapterApi - +func createShardVmContainerFactory(args scQueryElementArgs, argsHook hooks.ArgBlockChainHook) (process.VirtualMachinesContainerFactory, error) { queryVirtualMachineConfig := args.generalConfig.VirtualMachine.Querying.VirtualMachineConfig esdtTransferParser, errParser := parsers.NewESDTTransferParser(args.coreComponents.InternalMarshalizer()) if errParser != nil { - return nil, nil, errParser + return nil, errParser } blockChainHookImpl, errBlockChainHook := hooks.NewBlockChainHookImpl(argsHook) if errBlockChainHook != nil { - return nil, nil, errBlockChainHook + return nil, errBlockChainHook } argsNewVMFactory := shard.ArgVMContainerFactory{ @@ -519,6 +529,7 @@ func createShardVmContainerFactory(args *scQueryElementArgs, argsHook hooks.ArgB WasmVMChangeLocker: args.coreComponents.WasmVMChangeLocker(), ESDTTransferParser: esdtTransferParser, Hasher: args.coreComponents.Hasher(), + PubKeyConverter: args.coreComponents.AddressPubKeyConverter(), } log.Debug("apiResolver: enable epoch for sc deploy", "epoch", args.epochConfig.EnableEpochs.SCDeployEnableEpoch) @@ -527,13 +538,13 @@ func createShardVmContainerFactory(args *scQueryElementArgs, argsHook hooks.ArgB vmFactory, err := shard.NewVMContainerFactory(argsNewVMFactory) if err != nil { - return nil, nil, err + return nil, err } - return apiBlockchain, vmFactory, nil + return vmFactory, nil } -func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.ChainHandler) (state.AccountsAdapterAPI, error) { +func createNewAccountsAdapterApi(args scQueryElementArgs, chainHandler data.ChainHandler) (state.AccountsAdapterAPI, common.StorageManager, error) { argsAccCreator := factoryState.ArgsAccountCreator{ Hasher: args.coreComponents.Hasher(), Marshaller: args.coreComponents.InternalMarshalizer(), @@ -541,17 +552,17 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha } accountFactory, err := factoryState.NewAccountCreator(argsAccCreator) if err != nil { - return nil, err + return nil, nil, err } storagePruning, err := newStoragePruningManager(args) if err != nil { - return nil, err + return nil, nil, err } storageService := args.dataComponents.StorageService() trieStorer, err := storageService.GetStorer(dataRetriever.UserAccountsUnit) if err != nil { - return nil, err + return nil, nil, err } trieFactoryArgs := trieFactory.TrieFactoryArgs{ @@ -562,7 +573,7 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha } trFactory, err := trieFactory.NewTrieFactory(trieFactoryArgs) if err != nil { - return nil, err + return nil, nil, err } trieCreatorArgs := trieFactory.TrieCreateArgs{ @@ -575,9 +586,9 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha EnableEpochsHandler: args.coreComponents.EnableEpochsHandler(), StatsCollector: args.statusCoreComponents.StateStatsHandler(), } - _, merkleTrie, err := trFactory.Create(trieCreatorArgs) + trieStorageManager, merkleTrie, err := trFactory.Create(trieCreatorArgs) if err != nil { - return nil, err + return nil, nil, err } argsAPIAccountsDB := state.ArgsAccountsDB{ @@ -592,18 +603,20 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha provider, err := blockInfoProviders.NewCurrentBlockInfo(chainHandler) if err != nil { - return nil, err + return nil, nil, err } accounts, err := state.NewAccountsDB(argsAPIAccountsDB) if err != nil { - return nil, err + return nil, nil, err } - return state.NewAccountsDBApi(accounts, provider) + accountsDB, err := state.NewAccountsDBApi(accounts, provider) + + return accountsDB, trieStorageManager, err } -func newStoragePruningManager(args *scQueryElementArgs) (state.StoragePruningManager, error) { +func newStoragePruningManager(args scQueryElementArgs) (state.StoragePruningManager, error) { argsMemEviction := evictionWaitingList.MemoryEvictionWaitingListArgs{ RootHashesSize: args.generalConfig.EvictionWaitingList.RootHashesSize, HashesSize: args.generalConfig.EvictionWaitingList.HashesSize, diff --git a/factory/api/apiResolverFactory_test.go b/factory/api/apiResolverFactory_test.go index 591ea31b79f..6f0d1026304 100644 --- a/factory/api/apiResolverFactory_test.go +++ b/factory/api/apiResolverFactory_test.go @@ -1,6 +1,7 @@ package api_test import ( + "fmt" "strings" "sync" "testing" @@ -26,6 +27,7 @@ import ( epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" @@ -70,7 +72,7 @@ func createMockArgs(t *testing.T) *api.ApiResolverArgs { cryptoComponents := componentsMock.GetCryptoComponents(coreComponents) networkComponents := componentsMock.GetNetworkComponents(cryptoComponents) dataComponents := componentsMock.GetDataComponents(coreComponents, shardCoordinator) - stateComponents := componentsMock.GetStateComponents(coreComponents) + stateComponents := componentsMock.GetStateComponents(coreComponents, componentsMock.GetStatusCoreComponents()) processComponents := componentsMock.GetProcessComponents(shardCoordinator, coreComponents, networkComponents, dataComponents, cryptoComponents, stateComponents) argsB := componentsMock.GetBootStrapFactoryArgs() @@ -184,7 +186,7 @@ func TestCreateApiResolver(t *testing.T) { failingStepsInstance.addressPublicKeyConverterFailingStep = 3 apiResolver, err := api.CreateApiResolver(failingArgs) require.NotNil(t, err) - require.True(t, strings.Contains(strings.ToLower(err.Error()), "public key converter")) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "key converter")) require.True(t, check.IfNil(apiResolver)) }) t.Run("createBuiltinFuncs fails should error", func(t *testing.T) { @@ -273,7 +275,7 @@ func TestCreateApiResolver(t *testing.T) { failingStepsInstance.addressPublicKeyConverterFailingStep = 10 apiResolver, err := api.CreateApiResolver(failingArgs) require.NotNil(t, err) - require.True(t, strings.Contains(strings.ToLower(err.Error()), "public key converter")) + require.True(t, strings.Contains(strings.ToLower(err.Error()), "key converter")) require.True(t, check.IfNil(apiResolver)) }) t.Run("should work", func(t *testing.T) { @@ -311,6 +313,7 @@ func createMockSCQueryElementArgs() api.SCQueryElementArgs { WasmVMVersions: []config.WasmVMVersionByEpoch{ {StartEpoch: 0, Version: "*"}, }, + TransferAndExecuteByUserAddresses: []string{"erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe3"}, }, }, }, @@ -327,7 +330,7 @@ func createMockSCQueryElementArgs() api.SCQueryElementArgs { EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, Hash: &testscommon.HasherStub{}, RatingHandler: &testscommon.RaterMock{}, WasmVMChangeLockerInternal: &sync.RWMutex{}, @@ -346,6 +349,7 @@ func createMockSCQueryElementArgs() api.SCQueryElementArgs { AppStatusHandlerCalled: func() core.AppStatusHandler { return &statusHandler.AppStatusHandlerStub{} }, + StateStatsHandlerField: &testscommon.StateStatisticsHandlerStub{}, }, DataComponents: &mock.DataComponentsMock{ Storage: genericMocks.NewChainStorerMock(0), @@ -379,9 +383,10 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args := createMockSCQueryElementArgs() args.GuardedAccountHandler = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.Equal(t, process.ErrNilGuardedAccountHandler, err) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("DecodeAddresses fails", func(t *testing.T) { t.Parallel() @@ -390,10 +395,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args.CoreComponents = &mock.CoreComponentsMock{ AddrPubKeyConv: nil, } - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "public key converter")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("createBuiltinFuncs fails", func(t *testing.T) { t.Parallel() @@ -401,10 +407,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args := createMockSCQueryElementArgs() coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) coreCompMock.IntMarsh = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "marshalizer")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("NewCache fails", func(t *testing.T) { t.Parallel() @@ -414,10 +421,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { Type: "LRU", SizeInBytes: 1, } - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "lru")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("metachain - NewVMContainerFactory fails", func(t *testing.T) { t.Parallel() @@ -432,10 +440,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { } coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) coreCompMock.Hash = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "hasher")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("shard - NewVMContainerFactory fails", func(t *testing.T) { t.Parallel() @@ -443,10 +452,30 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args := createMockSCQueryElementArgs() coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) coreCompMock.Hash = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "hasher")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) + }) +} + +func TestCreateApiResolver_createBlockchainForScQuery(t *testing.T) { + t.Parallel() + + t.Run("for metachain", func(t *testing.T) { + t.Parallel() + + apiBlockchain, err := api.CreateBlockchainForScQuery(core.MetachainShardId) + require.NoError(t, err) + require.Equal(t, "*blockchain.metaChain", fmt.Sprintf("%T", apiBlockchain)) }) + t.Run("for shard", func(t *testing.T) { + t.Parallel() + + apiBlockchain, err := api.CreateBlockchainForScQuery(0) + require.NoError(t, err) + require.Equal(t, "*blockchain.blockChain", fmt.Sprintf("%T", apiBlockchain)) + }) } diff --git a/factory/api/export_test.go b/factory/api/export_test.go index 0164c0c2b10..a17ddfad30c 100644 --- a/factory/api/export_test.go +++ b/factory/api/export_test.go @@ -2,6 +2,8 @@ package api import ( "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" @@ -28,8 +30,8 @@ type SCQueryElementArgs struct { } // CreateScQueryElement - -func CreateScQueryElement(args SCQueryElementArgs) (process.SCQueryService, error) { - return createScQueryElement(&scQueryElementArgs{ +func CreateScQueryElement(args SCQueryElementArgs) (process.SCQueryService, common.StorageManager, error) { + return createScQueryElement(scQueryElementArgs{ generalConfig: args.GeneralConfig, epochConfig: args.EpochConfig, coreComponents: args.CoreComponents, @@ -47,3 +49,8 @@ func CreateScQueryElement(args SCQueryElementArgs) (process.SCQueryService, erro guardedAccountHandler: args.GuardedAccountHandler, }) } + +// CreateBlockchainForScQuery - +func CreateBlockchainForScQuery(selfShardID uint32) (data.ChainHandler, error) { + return createBlockchainForScQuery(selfShardID) +} diff --git a/factory/bootstrap/bootstrapComponents.go b/factory/bootstrap/bootstrapComponents.go index 988b72764e0..a9ef7851ccb 100644 --- a/factory/bootstrap/bootstrapComponents.go +++ b/factory/bootstrap/bootstrapComponents.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/process/headerCheck" "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/directoryhandler" storageFactory "github.com/multiversx/mx-chain-go/storage/factory" @@ -55,14 +56,15 @@ type bootstrapComponentsFactory struct { } type bootstrapComponents struct { - epochStartBootstrapper factory.EpochStartBootstrapper - bootstrapParamsHolder factory.BootstrapParamsHolder - nodeType core.NodeType - shardCoordinator sharding.Coordinator - headerVersionHandler nodeFactory.HeaderVersionHandler - versionedHeaderFactory nodeFactory.VersionedHeaderFactory - headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler - guardedAccountHandler process.GuardedAccountHandler + epochStartBootstrapper factory.EpochStartBootstrapper + bootstrapParamsHolder factory.BootstrapParamsHolder + nodeType core.NodeType + shardCoordinator sharding.Coordinator + headerVersionHandler nodeFactory.HeaderVersionHandler + versionedHeaderFactory nodeFactory.VersionedHeaderFactory + headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + guardedAccountHandler process.GuardedAccountHandler + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } // NewBootstrapComponentsFactory creates an instance of bootstrapComponentsFactory @@ -70,6 +72,9 @@ func NewBootstrapComponentsFactory(args BootstrapComponentsFactoryArgs) (*bootst if check.IfNil(args.CoreComponents) { return nil, errors.ErrNilCoreComponentsHolder } + if check.IfNil(args.CoreComponents.EnableEpochsHandler()) { + return nil, errors.ErrNilEnableEpochsHandler + } if check.IfNil(args.CryptoComponents) { return nil, errors.ErrNilCryptoComponentsHolder } @@ -185,31 +190,40 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { return nil, err } + nodesCoordinatorRegistryFactory, err := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + bcf.coreComponents.InternalMarshalizer(), + bcf.coreComponents.EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step2Flag), + ) + if err != nil { + return nil, err + } + epochStartBootstrapArgs := bootstrap.ArgsEpochStartBootstrap{ - CoreComponentsHolder: bcf.coreComponents, - CryptoComponentsHolder: bcf.cryptoComponents, - MainMessenger: bcf.networkComponents.NetworkMessenger(), - FullArchiveMessenger: bcf.networkComponents.FullArchiveNetworkMessenger(), - GeneralConfig: bcf.config, - PrefsConfig: bcf.prefConfig.Preferences, - FlagsConfig: bcf.flagsConfig, - EconomicsData: bcf.coreComponents.EconomicsData(), - GenesisNodesConfig: bcf.coreComponents.GenesisNodesSetup(), - GenesisShardCoordinator: genesisShardCoordinator, - StorageUnitOpener: unitOpener, - Rater: bcf.coreComponents.Rater(), - DestinationShardAsObserver: destShardIdAsObserver, - NodeShuffler: bcf.coreComponents.NodesShuffler(), - RoundHandler: bcf.coreComponents.RoundHandler(), - LatestStorageDataProvider: latestStorageDataProvider, - ArgumentsParser: smartContract.NewArgumentParser(), - StatusHandler: bcf.statusCoreComponents.AppStatusHandler(), - HeaderIntegrityVerifier: headerIntegrityVerifier, - DataSyncerCreator: dataSyncerFactory, - ScheduledSCRsStorer: nil, // will be updated after sync from network - TrieSyncStatisticsProvider: tss, - NodeProcessingMode: common.GetNodeProcessingMode(&bcf.importDbConfig), - StateStatsHandler: bcf.statusCoreComponents.StateStatsHandler(), + CoreComponentsHolder: bcf.coreComponents, + CryptoComponentsHolder: bcf.cryptoComponents, + MainMessenger: bcf.networkComponents.NetworkMessenger(), + FullArchiveMessenger: bcf.networkComponents.FullArchiveNetworkMessenger(), + GeneralConfig: bcf.config, + PrefsConfig: bcf.prefConfig.Preferences, + FlagsConfig: bcf.flagsConfig, + EconomicsData: bcf.coreComponents.EconomicsData(), + GenesisNodesConfig: bcf.coreComponents.GenesisNodesSetup(), + GenesisShardCoordinator: genesisShardCoordinator, + StorageUnitOpener: unitOpener, + Rater: bcf.coreComponents.Rater(), + DestinationShardAsObserver: destShardIdAsObserver, + NodeShuffler: bcf.coreComponents.NodesShuffler(), + RoundHandler: bcf.coreComponents.RoundHandler(), + LatestStorageDataProvider: latestStorageDataProvider, + ArgumentsParser: smartContract.NewArgumentParser(), + StatusHandler: bcf.statusCoreComponents.AppStatusHandler(), + HeaderIntegrityVerifier: headerIntegrityVerifier, + DataSyncerCreator: dataSyncerFactory, + ScheduledSCRsStorer: nil, // will be updated after sync from network + TrieSyncStatisticsProvider: tss, + NodeProcessingMode: common.GetNodeProcessingMode(&bcf.importDbConfig), + StateStatsHandler: bcf.statusCoreComponents.StateStatsHandler(), + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } var epochStartBootstrapper factory.EpochStartBootstrapper @@ -260,12 +274,13 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { bootstrapParamsHolder: &bootstrapParams{ bootstrapParams: bootstrapParameters, }, - nodeType: nodeType, - shardCoordinator: shardCoordinator, - headerVersionHandler: headerVersionHandler, - headerIntegrityVerifier: headerIntegrityVerifier, - versionedHeaderFactory: versionedHeaderFactory, - guardedAccountHandler: guardedAccountHandler, + nodeType: nodeType, + shardCoordinator: shardCoordinator, + headerVersionHandler: headerVersionHandler, + headerIntegrityVerifier: headerIntegrityVerifier, + versionedHeaderFactory: versionedHeaderFactory, + guardedAccountHandler: guardedAccountHandler, + nodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, }, nil } diff --git a/factory/bootstrap/bootstrapComponentsHandler.go b/factory/bootstrap/bootstrapComponentsHandler.go index bda412e2759..7401f4834f4 100644 --- a/factory/bootstrap/bootstrapComponentsHandler.go +++ b/factory/bootstrap/bootstrapComponentsHandler.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) var _ factory.ComponentHandler = (*managedBootstrapComponents)(nil) @@ -118,6 +119,18 @@ func (mbf *managedBootstrapComponents) EpochBootstrapParams() factory.BootstrapP return mbf.bootstrapComponents.bootstrapParamsHolder } +// NodesCoordinatorRegistryFactory returns the NodesCoordinatorRegistryFactory +func (mbf *managedBootstrapComponents) NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory { + mbf.mutBootstrapComponents.RLock() + defer mbf.mutBootstrapComponents.RUnlock() + + if mbf.bootstrapComponents == nil { + return nil + } + + return mbf.bootstrapComponents.nodesCoordinatorRegistryFactory +} + // IsInterfaceNil returns true if the underlying object is nil func (mbf *managedBootstrapComponents) IsInterfaceNil() bool { return mbf == nil diff --git a/factory/bootstrap/bootstrapComponents_test.go b/factory/bootstrap/bootstrapComponents_test.go index 85c22017b28..180315b1f36 100644 --- a/factory/bootstrap/bootstrapComponents_test.go +++ b/factory/bootstrap/bootstrapComponents_test.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory/bootstrap" @@ -38,6 +39,19 @@ func TestNewBootstrapComponentsFactory(t *testing.T) { require.Nil(t, bcf) require.Equal(t, errorsMx.ErrNilCoreComponentsHolder, err) }) + t.Run("nil enable epochs handler should error", func(t *testing.T) { + t.Parallel() + + argsCopy := args + argsCopy.CoreComponents = &factory.CoreComponentsHolderStub{ + EnableEpochsHandlerCalled: func() common.EnableEpochsHandler { + return nil + }, + } + bcf, err := bootstrap.NewBootstrapComponentsFactory(argsCopy) + require.Nil(t, bcf) + require.Equal(t, errorsMx.ErrNilEnableEpochsHandler, err) + }) t.Run("nil crypto components should error", func(t *testing.T) { t.Parallel() @@ -218,7 +232,8 @@ func TestBootstrapComponentsFactory_Create(t *testing.T) { coreComponents := componentsMock.GetDefaultCoreComponents() args.CoreComponents = coreComponents coreComponents.RatingHandler = nil - bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + bcf, err := bootstrap.NewBootstrapComponentsFactory(args) + require.Nil(t, err) require.NotNil(t, bcf) bc, err := bcf.Create() diff --git a/factory/bootstrap/bootstrapParameters.go b/factory/bootstrap/bootstrapParameters.go index 5002f597e55..0002beb1f62 100644 --- a/factory/bootstrap/bootstrapParameters.go +++ b/factory/bootstrap/bootstrapParameters.go @@ -25,7 +25,7 @@ func (bph *bootstrapParams) NumOfShards() uint32 { } // NodesConfig returns the nodes coordinator config after bootstrap -func (bph *bootstrapParams) NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry { +func (bph *bootstrapParams) NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler { return bph.bootstrapParams.NodesConfig } diff --git a/factory/bootstrap/shardingFactory.go b/factory/bootstrap/shardingFactory.go index 40472acae1e..6662129299b 100644 --- a/factory/bootstrap/shardingFactory.go +++ b/factory/bootstrap/shardingFactory.go @@ -113,6 +113,7 @@ func CreateNodesCoordinator( nodeTypeProvider core.NodeTypeProviderHandler, enableEpochsHandler common.EnableEpochsHandler, validatorInfoCacher epochStart.ValidatorInfoCacher, + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory, ) (nodesCoordinator.NodesCoordinator, error) { if check.IfNil(nodeShufflerOut) { return nil, errErd.ErrNilShuffleOutCloser @@ -165,15 +166,15 @@ func CreateNodesCoordinator( if bootstrapParameters.NodesConfig() != nil { nodeRegistry := bootstrapParameters.NodesConfig() currentEpoch = bootstrapParameters.Epoch() - epochsConfig, ok := nodeRegistry.EpochsConfig[fmt.Sprintf("%d", currentEpoch)] + epochsConfig, ok := nodeRegistry.GetEpochsConfig()[fmt.Sprintf("%d", currentEpoch)] if ok { - eligibles := epochsConfig.EligibleValidators + eligibles := epochsConfig.GetEligibleValidators() eligibleValidators, err = nodesCoordinator.SerializableValidatorsToValidators(eligibles) if err != nil { return nil, err } - waitings := epochsConfig.WaitingValidators + waitings := epochsConfig.GetWaitingValidators() waitingValidators, err = nodesCoordinator.SerializableValidatorsToValidators(waitings) if err != nil { return nil, err @@ -197,28 +198,29 @@ func CreateNodesCoordinator( } argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: marshalizer, - Hasher: hasher, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartNotifier, - BootStorer: bootStorer, - ShardIDAsObserver: shardIDAsObserver, - NbShards: nbShards, - EligibleNodes: eligibleValidators, - WaitingNodes: waitingValidators, - SelfPublicKey: pubKeyBytes, - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: shuffledOutHandler, - Epoch: currentEpoch, - StartEpoch: startEpoch, - ChanStopNode: chanNodeStop, - NodeTypeProvider: nodeTypeProvider, - IsFullArchive: prefsConfig.FullArchive, - EnableEpochsHandler: enableEpochsHandler, - ValidatorInfoCacher: validatorInfoCacher, - GenesisNodesSetupHandler: nodesConfig, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: marshalizer, + Hasher: hasher, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartNotifier, + BootStorer: bootStorer, + ShardIDAsObserver: shardIDAsObserver, + NbShards: nbShards, + EligibleNodes: eligibleValidators, + WaitingNodes: waitingValidators, + SelfPublicKey: pubKeyBytes, + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: shuffledOutHandler, + Epoch: currentEpoch, + StartEpoch: startEpoch, + ChanStopNode: chanNodeStop, + NodeTypeProvider: nodeTypeProvider, + IsFullArchive: prefsConfig.FullArchive, + EnableEpochsHandler: enableEpochsHandler, + ValidatorInfoCacher: validatorInfoCacher, + GenesisNodesSetupHandler: nodesConfig, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } baseNodesCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/factory/bootstrap/shardingFactory_test.go b/factory/bootstrap/shardingFactory_test.go index 0df777933b0..c7a54e077f4 100644 --- a/factory/bootstrap/shardingFactory_test.go +++ b/factory/bootstrap/shardingFactory_test.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -41,7 +42,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Run("nil pub key should error", func(t *testing.T) { t.Parallel() - shardC, nodeType, err := CreateShardCoordinator(&testscommon.NodesSetupStub{}, nil, config.PreferencesConfig{}, nil) + shardC, nodeType, err := CreateShardCoordinator(&genesisMocks.NodesSetupStub{}, nil, config.PreferencesConfig{}, nil) require.Equal(t, errErd.ErrNilPublicKey, err) require.Empty(t, nodeType) require.True(t, check.IfNil(shardC)) @@ -49,7 +50,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Run("nil logger should error", func(t *testing.T) { t.Parallel() - shardC, nodeType, err := CreateShardCoordinator(&testscommon.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{}, config.PreferencesConfig{}, nil) + shardC, nodeType, err := CreateShardCoordinator(&genesisMocks.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{}, config.PreferencesConfig{}, nil) require.Equal(t, errErd.ErrNilLogger, err) require.Empty(t, nodeType) require.True(t, check.IfNil(shardC)) @@ -58,7 +59,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{ ToByteArrayStub: func() ([]byte, error) { return nil, expectedErr @@ -75,7 +76,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{ ToByteArrayStub: func() ([]byte, error) { return nil, sharding.ErrPublicKeyNotFoundInGenesis // force this error here @@ -95,7 +96,7 @@ func TestCreateShardCoordinator(t *testing.T) { counter := 0 shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return 0, sharding.ErrPublicKeyNotFoundInGenesis // force this error }, @@ -123,7 +124,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return 0, sharding.ErrPublicKeyNotFoundInGenesis // force this error }, @@ -149,7 +150,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return core.MetachainShardId, nil }, @@ -169,7 +170,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return core.MetachainShardId, nil }, @@ -192,7 +193,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( nil, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, &cryptoMocks.PublicKeyStub{}, @@ -208,6 +209,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilShuffleOutCloser, err) require.True(t, check.IfNil(nodesC)) @@ -233,6 +235,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilGenesisNodesSetupHandler, err) require.True(t, check.IfNil(nodesC)) @@ -242,7 +245,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, nil, &cryptoMocks.PublicKeyStub{}, @@ -258,6 +261,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilEpochStartNotifier, err) require.True(t, check.IfNil(nodesC)) @@ -267,7 +271,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, nil, @@ -283,6 +287,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilPublicKey, err) require.True(t, check.IfNil(nodesC)) @@ -292,7 +297,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, &cryptoMocks.PublicKeyStub{}, @@ -308,6 +313,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilBootstrapParamsHandler, err) require.True(t, check.IfNil(nodesC)) @@ -317,7 +323,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, &cryptoMocks.PublicKeyStub{}, @@ -333,6 +339,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, nodesCoordinator.ErrNilNodeStopChannel, err) require.True(t, check.IfNil(nodesC)) @@ -342,7 +349,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "", }, @@ -360,6 +367,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -369,7 +377,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "disabled", }, @@ -391,6 +399,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.True(t, errors.Is(err, expectedErr)) require.True(t, check.IfNil(nodesC)) @@ -400,7 +409,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -422,6 +431,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.True(t, errors.Is(err, expectedErr)) require.True(t, check.IfNil(nodesC)) @@ -431,7 +441,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -453,6 +463,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -462,7 +473,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -484,6 +495,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -493,7 +505,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -510,7 +522,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &shardingMocks.NodeShufflerMock{}, 0, &bootstrapMocks.BootstrapParamsHandlerMock{ - NodesConfigCalled: func() *nodesCoordinator.NodesCoordinatorRegistry { + NodesConfigCalled: func() nodesCoordinator.NodesCoordinatorRegistryHandler { return &nodesCoordinator.NodesCoordinatorRegistry{ EpochsConfig: map[string]*nodesCoordinator.EpochValidators{ "0": { @@ -536,6 +548,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -545,7 +558,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "disabled", }, @@ -562,7 +575,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &shardingMocks.NodeShufflerMock{}, 0, &bootstrapMocks.BootstrapParamsHandlerMock{ - NodesConfigCalled: func() *nodesCoordinator.NodesCoordinatorRegistry { + NodesConfigCalled: func() nodesCoordinator.NodesCoordinatorRegistryHandler { return &nodesCoordinator.NodesCoordinatorRegistry{ EpochsConfig: map[string]*nodesCoordinator.EpochValidators{ "0": { @@ -588,6 +601,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Nil(t, err) require.False(t, check.IfNil(nodesC)) @@ -608,7 +622,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.EpochStartConfig{ MaxShuffledOutRestartThreshold: 5.0, }, @@ -621,7 +635,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.EpochStartConfig{ MinShuffledOutRestartThreshold: 5.0, }, @@ -634,7 +648,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.EpochStartConfig{}, nil, // force NewShuffleOutCloser to fail ) @@ -645,7 +659,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return 4000 }, diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index a2dc7a3e1bf..decdb7c85fa 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -261,11 +261,6 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { return nil, err } - sentSignaturesHandler, err := spos.NewSentSignaturesTracker(ccf.cryptoComponents.KeysHandler()) - if err != nil { - return nil, err - } - fct, err := sposFactory.GetSubroundsFactory( consensusDataContainer, consensusState, @@ -273,7 +268,7 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { ccf.config.Consensus.Type, ccf.statusCoreComponents.AppStatusHandler(), ccf.statusComponents.OutportHandler(), - sentSignaturesHandler, + ccf.processComponents.SentSignaturesTracker(), []byte(ccf.coreComponents.ChainID()), ccf.networkComponents.NetworkMessenger().ID(), ) diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index 67f551acf1d..a7b00e6a347 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -29,6 +29,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" outportMocks "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" @@ -57,7 +58,7 @@ func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponent AlarmSch: &testscommon.AlarmSchedulerStub{}, NtpSyncTimer: &testscommon.SyncTimerStub{}, GenesisBlockTime: time.Time{}, - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &genesisMocks.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return 2 }, @@ -139,6 +140,7 @@ func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponent HeaderSigVerif: &testsMocks.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, FallbackHdrValidator: &testscommon.FallBackHeaderValidatorStub{}, + SentSignaturesTrackerInternal: &testscommon.SentSignatureTrackerStub{}, }, StateComponents: &factoryMocks.StateComponentsMock{ StorageManagers: map[string]common.StorageManager{ diff --git a/factory/core/coreComponents.go b/factory/core/coreComponents.go index f04afe47d61..247ee7e05f8 100644 --- a/factory/core/coreComponents.go +++ b/factory/core/coreComponents.go @@ -33,7 +33,6 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/economics" "github.com/multiversx/mx-chain-go/process/rating" - "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/statusHandler" @@ -244,35 +243,15 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { } wasmVMChangeLocker := &sync.RWMutex{} - gasScheduleConfigurationFolderName := ccf.configPathsHolder.GasScheduleDirectoryName - argsGasScheduleNotifier := forking.ArgsNewGasScheduleNotifier{ - GasScheduleConfig: ccf.epochConfig.GasSchedule, - ConfigDir: gasScheduleConfigurationFolderName, - EpochNotifier: epochNotifier, - WasmVMChangeLocker: wasmVMChangeLocker, - } - gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasScheduleNotifier) - if err != nil { - return nil, err - } - - builtInCostHandler, err := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - ArgsParser: smartContract.NewArgumentParser(), - GasSchedule: gasScheduleNotifier, - }) - if err != nil { - return nil, err - } txVersionChecker := versioning.NewTxVersionChecker(ccf.config.GeneralSettings.MinTransactionVersion) log.Trace("creating economics data components") argsNewEconomicsData := economics.ArgsNewEconomicsData{ - Economics: &ccf.economicsConfig, - EpochNotifier: epochNotifier, - EnableEpochsHandler: enableEpochsHandler, - BuiltInFunctionsCostHandler: builtInCostHandler, - TxVersionChecker: txVersionChecker, + Economics: &ccf.economicsConfig, + EpochNotifier: epochNotifier, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: txVersionChecker, } economicsData, err := economics.NewEconomicsData(argsNewEconomicsData) if err != nil { @@ -311,6 +290,7 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { ShuffleBetweenShards: true, MaxNodesEnableConfig: ccf.epochConfig.EnableEpochs.MaxNodesChangeEnableEpoch, EnableEpochsHandler: enableEpochsHandler, + EnableEpochs: ccf.epochConfig.EnableEpochs, } nodesShuffler, err := nodesCoordinator.NewHashValidatorsShuffler(argsNodesShuffler) diff --git a/factory/core/coreComponents_test.go b/factory/core/coreComponents_test.go index 79aba4a2532..d88a8a2284e 100644 --- a/factory/core/coreComponents_test.go +++ b/factory/core/coreComponents_test.go @@ -248,18 +248,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidRoundConfigShouldErr(t require.NotNil(t, err) } -func TestCoreComponentsFactory_CreateCoreComponentsInvalidEpochConfigShouldErr(t *testing.T) { - t.Parallel() - - args := componentsMock.GetCoreArgs() - args.EpochConfig = config.EpochConfig{} - ccf, _ := coreComp.NewCoreComponentsFactory(args) - - cc, err := ccf.Create() - require.Nil(t, cc) - require.NotNil(t, err) -} - func TestCoreComponentsFactory_CreateCoreComponentsInvalidGenesisMaxNumberOfShardsShouldErr(t *testing.T) { t.Parallel() diff --git a/factory/disabled/auctionListDisplayer.go b/factory/disabled/auctionListDisplayer.go new file mode 100644 index 00000000000..ec2d2f0774b --- /dev/null +++ b/factory/disabled/auctionListDisplayer.go @@ -0,0 +1,35 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/state" +) + +type auctionListDisplayer struct { +} + +// NewDisabledAuctionListDisplayer creates a disabled auction list displayer +func NewDisabledAuctionListDisplayer() *auctionListDisplayer { + return &auctionListDisplayer{} +} + +// DisplayOwnersData does nothing +func (ald *auctionListDisplayer) DisplayOwnersData(_ map[string]*metachain.OwnerAuctionData) { +} + +// DisplayOwnersSelectedNodes does nothing +func (ald *auctionListDisplayer) DisplayOwnersSelectedNodes(_ map[string]*metachain.OwnerAuctionData) { +} + +// DisplayAuctionList does nothing +func (ald *auctionListDisplayer) DisplayAuctionList( + _ []state.ValidatorInfoHandler, + _ map[string]*metachain.OwnerAuctionData, + _ uint32, +) { +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ald *auctionListDisplayer) IsInterfaceNil() bool { + return ald == nil +} diff --git a/factory/disabled/auctionListSelector.go b/factory/disabled/auctionListSelector.go new file mode 100644 index 00000000000..281102a4a7f --- /dev/null +++ b/factory/disabled/auctionListSelector.go @@ -0,0 +1,21 @@ +package disabled + +import "github.com/multiversx/mx-chain-go/state" + +type auctionListSelector struct { +} + +// NewDisabledAuctionListSelector returns a new instance of a disabled auction list selector +func NewDisabledAuctionListSelector() *auctionListSelector { + return &auctionListSelector{} +} + +// SelectNodesFromAuctionList returns nil +func (als *auctionListSelector) SelectNodesFromAuctionList(state.ShardValidatorsInfoMapHandler, []byte) error { + return nil +} + +// IsInterfaceNil returns true if the underlying pointer is nil +func (als *auctionListSelector) IsInterfaceNil() bool { + return als == nil +} diff --git a/factory/disabled/epochStartSystemSCProcessor.go b/factory/disabled/epochStartSystemSCProcessor.go new file mode 100644 index 00000000000..7d9e8720a79 --- /dev/null +++ b/factory/disabled/epochStartSystemSCProcessor.go @@ -0,0 +1,42 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" +) + +type epochStartSystemSCProcessor struct { +} + +// NewDisabledEpochStartSystemSC creates a new disabled EpochStartSystemSCProcessor instance +func NewDisabledEpochStartSystemSC() *epochStartSystemSCProcessor { + return &epochStartSystemSCProcessor{} +} + +// ToggleUnStakeUnBond returns nil +func (e *epochStartSystemSCProcessor) ToggleUnStakeUnBond(_ bool) error { + return nil +} + +// ProcessSystemSmartContract returns nil +func (e *epochStartSystemSCProcessor) ProcessSystemSmartContract( + _ state.ShardValidatorsInfoMapHandler, + _ data.HeaderHandler, +) error { + return nil +} + +// ProcessDelegationRewards returns nil +func (e *epochStartSystemSCProcessor) ProcessDelegationRewards( + _ block.MiniBlockSlice, + _ epochStart.TransactionCacher, +) error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (e *epochStartSystemSCProcessor) IsInterfaceNil() bool { + return e == nil +} diff --git a/factory/disabled/globalSettingsHandler.go b/factory/disabled/globalSettingsHandler.go index 6a950fd17b2..4990c1032fb 100644 --- a/factory/disabled/globalSettingsHandler.go +++ b/factory/disabled/globalSettingsHandler.go @@ -7,6 +7,26 @@ func NewDisabledGlobalSettingHandler() *disabledGlobalSettingsHandler { return &disabledGlobalSettingsHandler{} } +// IsBurnForAll returns false as this is a disabled component +func (d *disabledGlobalSettingsHandler) IsBurnForAll(_ []byte) bool { + return false +} + +// IsSenderOrDestinationWithTransferRole returns false as this is a disabled component +func (d *disabledGlobalSettingsHandler) IsSenderOrDestinationWithTransferRole(_, _, _ []byte) bool { + return false +} + +// GetTokenType returns 0 as this is a disabled component +func (d *disabledGlobalSettingsHandler) GetTokenType(_ []byte) (uint32, error) { + return 0, nil +} + +// SetTokenType does nothing as this is a disabled component +func (d *disabledGlobalSettingsHandler) SetTokenType(_ []byte, _ uint32) error { + return nil +} + // IsPaused returns false as this is a disabled component func (d *disabledGlobalSettingsHandler) IsPaused(_ []byte) bool { return false diff --git a/factory/disabled/stakingDataProvider.go b/factory/disabled/stakingDataProvider.go new file mode 100644 index 00000000000..f24b7b735b2 --- /dev/null +++ b/factory/disabled/stakingDataProvider.go @@ -0,0 +1,38 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" +) + +type stakingDataProvider struct { +} + +// NewDisabledStakingDataProvider returns a new instance of stakingDataProvider +func NewDisabledStakingDataProvider() *stakingDataProvider { + return &stakingDataProvider{} +} + +// FillValidatorInfo returns a nil error +func (s *stakingDataProvider) FillValidatorInfo(state.ValidatorInfoHandler) error { + return nil +} + +// ComputeUnQualifiedNodes returns nil values +func (s *stakingDataProvider) ComputeUnQualifiedNodes(_ state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { + return nil, nil, nil +} + +// GetOwnersData returns nil +func (s *stakingDataProvider) GetOwnersData() map[string]*epochStart.OwnerData { + return nil +} + +// Clean does nothing +func (s *stakingDataProvider) Clean() { +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *stakingDataProvider) IsInterfaceNil() bool { + return s == nil +} diff --git a/factory/disabled/txCoordinator.go b/factory/disabled/txCoordinator.go index e4ada3dc6ab..9d8002fb034 100644 --- a/factory/disabled/txCoordinator.go +++ b/factory/disabled/txCoordinator.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/processedMb" ) @@ -111,7 +112,7 @@ func (txCoordinator *TxCoordinator) VerifyCreatedMiniBlocks(_ data.HeaderHandler } // AddIntermediateTransactions does nothing as it is disabled -func (txCoordinator *TxCoordinator) AddIntermediateTransactions(_ map[block.Type][]data.TransactionHandler) error { +func (txCoordinator *TxCoordinator) AddIntermediateTransactions(_ map[block.Type][]data.TransactionHandler, _ []byte) error { return nil } diff --git a/factory/heartbeat/heartbeatV2Components.go b/factory/heartbeat/heartbeatV2Components.go index a551f22e869..97164a7240e 100644 --- a/factory/heartbeat/heartbeatV2Components.go +++ b/factory/heartbeat/heartbeatV2Components.go @@ -272,32 +272,6 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error return nil, err } - argsMainCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: hcf.processComponents.ShardCoordinator(), - PeerShardMapper: hcf.processComponents.PeerShardMapper(), - } - mainCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsMainCrossShardPeerTopicNotifier) - if err != nil { - return nil, err - } - err = hcf.networkComponents.NetworkMessenger().AddPeerTopicNotifier(mainCrossShardPeerTopicNotifier) - if err != nil { - return nil, err - } - - argsFullArchiveCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: hcf.processComponents.ShardCoordinator(), - PeerShardMapper: hcf.processComponents.FullArchivePeerShardMapper(), - } - fullArchiveCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsFullArchiveCrossShardPeerTopicNotifier) - if err != nil { - return nil, err - } - err = hcf.networkComponents.FullArchiveNetworkMessenger().AddPeerTopicNotifier(fullArchiveCrossShardPeerTopicNotifier) - if err != nil { - return nil, err - } - return &heartbeatV2Components{ sender: heartbeatV2Sender, peerAuthRequestsProcessor: paRequestsProcessor, diff --git a/factory/heartbeat/heartbeatV2Components_test.go b/factory/heartbeat/heartbeatV2Components_test.go index f013294a7d1..9a0eb3b14e3 100644 --- a/factory/heartbeat/heartbeatV2Components_test.go +++ b/factory/heartbeat/heartbeatV2Components_test.go @@ -11,8 +11,6 @@ import ( errorsMx "github.com/multiversx/mx-chain-go/errors" heartbeatComp "github.com/multiversx/mx-chain-go/factory/heartbeat" testsMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" - "github.com/multiversx/mx-chain-go/p2p" - "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" @@ -474,56 +472,6 @@ func TestHeartbeatV2Components_Create(t *testing.T) { assert.Nil(t, hc) assert.Error(t, err) }) - t.Run("NewCrossShardPeerTopicNotifier fails should error", func(t *testing.T) { - t.Parallel() - - args := createMockHeartbeatV2ComponentsFactoryArgs() - processComp := args.ProcessComponents - cnt := 0 - args.ProcessComponents = &testsMocks.ProcessComponentsStub{ - NodesCoord: processComp.NodesCoordinator(), - EpochTrigger: processComp.EpochStartTrigger(), - EpochNotifier: processComp.EpochStartNotifier(), - NodeRedundancyHandlerInternal: processComp.NodeRedundancyHandler(), - HardforkTriggerField: processComp.HardforkTrigger(), - MainPeerMapper: processComp.PeerShardMapper(), - FullArchivePeerMapper: processComp.FullArchivePeerShardMapper(), - ShardCoordinatorCalled: func() sharding.Coordinator { - cnt++ - if cnt > 3 { - return nil - } - return processComp.ShardCoordinator() - }, - } - hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) - assert.NotNil(t, hcf) - assert.NoError(t, err) - - hc, err := hcf.Create() - assert.Nil(t, hc) - assert.Error(t, err) - }) - t.Run("AddPeerTopicNotifier fails should error", func(t *testing.T) { - t.Parallel() - - args := createMockHeartbeatV2ComponentsFactoryArgs() - args.NetworkComponents = &testsMocks.NetworkComponentsStub{ - Messenger: &p2pmocks.MessengerStub{ - AddPeerTopicNotifierCalled: func(notifier p2p.PeerTopicNotifier) error { - return expectedErr - }, - }, - FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, - } - hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) - assert.NotNil(t, hcf) - assert.NoError(t, err) - - hc, err := hcf.Create() - assert.Nil(t, hc) - assert.Equal(t, expectedErr, err) - }) t.Run("should work", func(t *testing.T) { t.Parallel() diff --git a/factory/interface.go b/factory/interface.go index 2498cc916c4..ec92f478992 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -309,6 +309,9 @@ type ProcessComponentsHolder interface { ESDTDataStorageHandlerForAPI() vmcommon.ESDTNFTStorageHandler AccountsParser() genesis.AccountsParser ReceiptsRepository() ReceiptsRepository + SentSignaturesTracker() process.SentSignaturesTracker + EpochSystemSCProcessor() process.EpochStartSystemSCProcessor + RelayedTxV3Processor() process.RelayedTxV3Processor IsInterfaceNil() bool } @@ -435,7 +438,7 @@ type BootstrapParamsHolder interface { Epoch() uint32 SelfShardID() uint32 NumOfShards() uint32 - NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry + NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler IsInterfaceNil() bool } @@ -456,6 +459,7 @@ type BootstrapComponentsHolder interface { HeaderVersionHandler() factory.HeaderVersionHandler HeaderIntegrityVerifier() factory.HeaderIntegrityVerifierHandler GuardedAccountHandler() process.GuardedAccountHandler + NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory IsInterfaceNil() bool } diff --git a/factory/mock/nodesSetupStub.go b/factory/mock/nodesSetupStub.go deleted file mode 100644 index 835ad9fc0d8..00000000000 --- a/factory/mock/nodesSetupStub.go +++ /dev/null @@ -1,142 +0,0 @@ -package mock - -import "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetChainIdCalled func() string - GetMinTransactionVersionCalled func() uint32 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - MinNumberOfMetaNodesCalled func() uint32 - GetHysteresisCalled func() float32 - GetAdaptivityCalled func() bool -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - - return 1 -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - - return 1 -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - - return 0 -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - - return false -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 2 -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 0 -} - -// GetChainId - -func (n *NodesSetupStub) GetChainId() string { - if n.GetChainIdCalled != nil { - return n.GetChainIdCalled() - } - return "chainID" -} - -// GetMinTransactionVersion - -func (n *NodesSetupStub) GetMinTransactionVersion() uint32 { - if n.GetMinTransactionVersionCalled != nil { - return n.GetMinTransactionVersionCalled() - } - return 1 -} - -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 0 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 0 -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 0 -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - return nil, nil -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/factory/mock/processComponentsStub.go b/factory/mock/processComponentsStub.go index 51265a22997..4a52413b7cd 100644 --- a/factory/mock/processComponentsStub.go +++ b/factory/mock/processComponentsStub.go @@ -56,6 +56,9 @@ type ProcessComponentsMock struct { ESDTDataStorageHandlerForAPIInternal vmcommon.ESDTNFTStorageHandler AccountsParserInternal genesis.AccountsParser ReceiptsRepositoryInternal factory.ReceiptsRepository + SentSignaturesTrackerInternal process.SentSignaturesTracker + EpochSystemSCProcessorInternal process.EpochStartSystemSCProcessor + RelayedTxV3ProcessorField process.RelayedTxV3Processor } // Create - @@ -278,6 +281,21 @@ func (pcm *ProcessComponentsMock) ReceiptsRepository() factory.ReceiptsRepositor return pcm.ReceiptsRepositoryInternal } +// SentSignaturesTracker - +func (pcm *ProcessComponentsMock) SentSignaturesTracker() process.SentSignaturesTracker { + return pcm.SentSignaturesTrackerInternal +} + +// EpochSystemSCProcessor - +func (pcm *ProcessComponentsMock) EpochSystemSCProcessor() process.EpochStartSystemSCProcessor { + return pcm.EpochSystemSCProcessorInternal +} + +// RelayedTxV3Processor - +func (pcm *ProcessComponentsMock) RelayedTxV3Processor() process.RelayedTxV3Processor { + return pcm.RelayedTxV3ProcessorField +} + // IsInterfaceNil - func (pcm *ProcessComponentsMock) IsInterfaceNil() bool { return pcm == nil diff --git a/factory/mock/validatorStatisticsProcessorStub.go b/factory/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 1cb51e79f41..00000000000 --- a/factory/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,130 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - return nil -} - -// GetPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false -} diff --git a/factory/mock/validatorsProviderStub.go b/factory/mock/validatorsProviderStub.go deleted file mode 100644 index 98ea652340b..00000000000 --- a/factory/mock/validatorsProviderStub.go +++ /dev/null @@ -1,28 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data/validator" -) - -// ValidatorsProviderStub - -type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics -} - -// GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { - if vp.GetLatestValidatorsCalled != nil { - return vp.GetLatestValidatorsCalled() - } - return nil -} - -// Close - -func (vp *ValidatorsProviderStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { - return vp == nil -} diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 7bccd5d8af0..93f3e1e95a3 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -12,7 +12,9 @@ import ( debugFactory "github.com/multiversx/mx-chain-go/debug/factory" "github.com/multiversx/mx-chain-go/epochStart" metachainEpochStart "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/epochStart/notifier" mainFactory "github.com/multiversx/mx-chain-go/factory" + factoryDisabled "github.com/multiversx/mx-chain-go/factory/disabled" "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/outport" processOutport "github.com/multiversx/mx-chain-go/outport/process" @@ -36,6 +38,7 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract/scrCommon" "github.com/multiversx/mx-chain-go/process/throttle" "github.com/multiversx/mx-chain-go/process/transaction" + "github.com/multiversx/mx-chain-go/process/transactionLog" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/storage/txcache" @@ -48,6 +51,7 @@ import ( type blockProcessorAndVmFactories struct { blockProcessor process.BlockProcessor vmFactoryForProcessing process.VirtualMachinesContainerFactory + epochSystemSCProcessor process.EpochStartSystemSCProcessor } func (pcf *processComponentsFactory) newBlockProcessor( @@ -65,6 +69,8 @@ func (pcf *processComponentsFactory) newBlockProcessor( receiptsRepository mainFactory.ReceiptsRepository, blockCutoffProcessingHandler cutoff.BlockProcessingCutoffHandler, missingTrieNodesNotifier common.MissingTrieNodesNotifier, + sentSignaturesTracker process.SentSignaturesTracker, + relayedTxV3Processor process.RelayedTxV3Processor, ) (*blockProcessorAndVmFactories, error) { shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { @@ -82,6 +88,8 @@ func (pcf *processComponentsFactory) newBlockProcessor( receiptsRepository, blockCutoffProcessingHandler, missingTrieNodesNotifier, + sentSignaturesTracker, + relayedTxV3Processor, ) } if shardCoordinator.SelfId() == core.MetachainShardId { @@ -99,6 +107,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( processedMiniBlocksTracker, receiptsRepository, blockCutoffProcessingHandler, + sentSignaturesTracker, ) } @@ -121,6 +130,8 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( receiptsRepository mainFactory.ReceiptsRepository, blockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler, missingTrieNodesNotifier common.MissingTrieNodesNotifier, + sentSignaturesTracker process.SentSignaturesTracker, + relayedTxV3Processor process.RelayedTxV3Processor, ) (*blockProcessorAndVmFactories, error) { argsParser := smartContract.NewArgumentParser() @@ -217,6 +228,11 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( return nil, err } + err = pcf.coreData.EconomicsData().SetTxTypeHandler(txTypeHandler) + if err != nil { + return nil, err + } + gasHandler, err := preprocess.NewGasComputation( pcf.coreData.EconomicsData(), txTypeHandler, @@ -226,34 +242,32 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( return nil, err } - txFeeHandler, err := postprocess.NewFeeAccumulator() - if err != nil { - return nil, err - } - + failedTxLogsAccumulator := transactionLog.NewFailedTxLogsAccumulator() + txFeeHandler := postprocess.NewFeeAccumulator() argsNewScProcessor := scrCommon.ArgsNewSmartContractProcessor{ - VmContainer: vmContainer, - ArgsParser: argsParser, - Hasher: pcf.coreData.Hasher(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - AccountsDB: pcf.state.AccountsAdapter(), - BlockChainHook: vmFactory.BlockChainHookImpl(), - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - PubkeyConv: pcf.coreData.AddressPubKeyConverter(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - ScrForwarder: scForwarder, - TxFeeHandler: txFeeHandler, - EconomicsFee: pcf.coreData.EconomicsData(), - GasHandler: gasHandler, - GasSchedule: pcf.gasSchedule, - TxLogsProcessor: pcf.txLogsProcessor, - TxTypeHandler: txTypeHandler, - IsGenesisProcessing: false, - BadTxForwarder: badTxInterim, - EnableRoundsHandler: pcf.coreData.EnableRoundsHandler(), - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), - VMOutputCacher: txcache.NewDisabledCache(), - WasmVMChangeLocker: wasmVMChangeLocker, + VmContainer: vmContainer, + ArgsParser: argsParser, + Hasher: pcf.coreData.Hasher(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + AccountsDB: pcf.state.AccountsAdapter(), + BlockChainHook: vmFactory.BlockChainHookImpl(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScrForwarder: scForwarder, + TxFeeHandler: txFeeHandler, + EconomicsFee: pcf.coreData.EconomicsData(), + GasHandler: gasHandler, + GasSchedule: pcf.gasSchedule, + TxLogsProcessor: pcf.txLogsProcessor, + TxTypeHandler: txTypeHandler, + IsGenesisProcessing: false, + BadTxForwarder: badTxInterim, + EnableRoundsHandler: pcf.coreData.EnableRoundsHandler(), + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + VMOutputCacher: txcache.NewDisabledCache(), + WasmVMChangeLocker: wasmVMChangeLocker, + FailedTxLogsAccumulator: failedTxLogsAccumulator, } scProcessorProxy, err := processProxy.NewSmartContractProcessorProxy(argsNewScProcessor, pcf.epochNotifier) @@ -271,25 +285,27 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( } argsNewTxProcessor := transaction.ArgsNewTxProcessor{ - Accounts: pcf.state.AccountsAdapter(), - Hasher: pcf.coreData.Hasher(), - PubkeyConv: pcf.coreData.AddressPubKeyConverter(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - SignMarshalizer: pcf.coreData.TxMarshalizer(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - ScProcessor: scProcessorProxy, - TxFeeHandler: txFeeHandler, - TxTypeHandler: txTypeHandler, - EconomicsFee: pcf.coreData.EconomicsData(), - ReceiptForwarder: receiptTxInterim, - BadTxForwarder: badTxInterim, - ArgsParser: argsParser, - ScrForwarder: scForwarder, - EnableRoundsHandler: pcf.coreData.EnableRoundsHandler(), - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), - GuardianChecker: pcf.bootstrapComponents.GuardedAccountHandler(), - TxVersionChecker: pcf.coreData.TxVersionChecker(), - TxLogsProcessor: pcf.txLogsProcessor, + Accounts: pcf.state.AccountsAdapter(), + Hasher: pcf.coreData.Hasher(), + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + SignMarshalizer: pcf.coreData.TxMarshalizer(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScProcessor: scProcessorProxy, + TxFeeHandler: txFeeHandler, + TxTypeHandler: txTypeHandler, + EconomicsFee: pcf.coreData.EconomicsData(), + ReceiptForwarder: receiptTxInterim, + BadTxForwarder: badTxInterim, + ArgsParser: argsParser, + ScrForwarder: scForwarder, + EnableRoundsHandler: pcf.coreData.EnableRoundsHandler(), + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + GuardianChecker: pcf.bootstrapComponents.GuardedAccountHandler(), + TxVersionChecker: pcf.coreData.TxVersionChecker(), + TxLogsProcessor: pcf.txLogsProcessor, + RelayedTxV3Processor: relayedTxV3Processor, + FailedTxLogsAccumulator: failedTxLogsAccumulator, } transactionProcessor, err := transaction.NewTxProcessor(argsNewTxProcessor) if err != nil { @@ -432,6 +448,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( OutportDataProvider: outportDataProvider, BlockProcessingCutoffHandler: blockProcessingCutoffHandler, ManagedPeersHolder: pcf.crypto.ManagedPeersHolder(), + SentSignaturesTracker: sentSignaturesTracker, } arguments := block.ArgShardProcessor{ ArgBaseProcessor: argumentsBaseProcessor, @@ -447,10 +464,16 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( return nil, err } - return &blockProcessorAndVmFactories{ + blockProcessorComponents := &blockProcessorAndVmFactories{ blockProcessor: blockProcessor, vmFactoryForProcessing: vmFactory, - }, nil + epochSystemSCProcessor: factoryDisabled.NewDisabledEpochStartSystemSC(), + } + + pcf.stakingDataProviderAPI = factoryDisabled.NewDisabledStakingDataProvider() + pcf.auctionListSelectorAPI = factoryDisabled.NewDisabledAuctionListSelector() + + return blockProcessorComponents, nil } func (pcf *processComponentsFactory) newMetaBlockProcessor( @@ -467,6 +490,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository mainFactory.ReceiptsRepository, blockProcessingCutoffhandler cutoff.BlockProcessingCutoffHandler, + sentSignaturesTracker process.SentSignaturesTracker, ) (*blockProcessorAndVmFactories, error) { builtInFuncFactory, err := pcf.createBuiltInFunctionContainer(pcf.state.AccountsAdapter(), make(map[string]struct{})) if err != nil { @@ -541,6 +565,11 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } + err = pcf.coreData.EconomicsData().SetTxTypeHandler(txTypeHandler) + if err != nil { + return nil, err + } + gasHandler, err := preprocess.NewGasComputation( pcf.coreData.EconomicsData(), txTypeHandler, @@ -550,35 +579,33 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } - txFeeHandler, err := postprocess.NewFeeAccumulator() - if err != nil { - return nil, err - } - + failedTxLogsAccumulator := transactionLog.NewFailedTxLogsAccumulator() + txFeeHandler := postprocess.NewFeeAccumulator() enableEpochs := pcf.epochConfig.EnableEpochs argsNewScProcessor := scrCommon.ArgsNewSmartContractProcessor{ - VmContainer: vmContainer, - ArgsParser: argsParser, - Hasher: pcf.coreData.Hasher(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - AccountsDB: pcf.state.AccountsAdapter(), - BlockChainHook: vmFactory.BlockChainHookImpl(), - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - PubkeyConv: pcf.coreData.AddressPubKeyConverter(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - ScrForwarder: scForwarder, - TxFeeHandler: txFeeHandler, - EconomicsFee: pcf.coreData.EconomicsData(), - TxTypeHandler: txTypeHandler, - GasHandler: gasHandler, - GasSchedule: pcf.gasSchedule, - TxLogsProcessor: pcf.txLogsProcessor, - IsGenesisProcessing: false, - BadTxForwarder: badTxForwarder, - EnableRoundsHandler: pcf.coreData.EnableRoundsHandler(), - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), - VMOutputCacher: txcache.NewDisabledCache(), - WasmVMChangeLocker: wasmVMChangeLocker, + VmContainer: vmContainer, + ArgsParser: argsParser, + Hasher: pcf.coreData.Hasher(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + AccountsDB: pcf.state.AccountsAdapter(), + BlockChainHook: vmFactory.BlockChainHookImpl(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScrForwarder: scForwarder, + TxFeeHandler: txFeeHandler, + EconomicsFee: pcf.coreData.EconomicsData(), + TxTypeHandler: txTypeHandler, + GasHandler: gasHandler, + GasSchedule: pcf.gasSchedule, + TxLogsProcessor: pcf.txLogsProcessor, + IsGenesisProcessing: false, + BadTxForwarder: badTxForwarder, + EnableRoundsHandler: pcf.coreData.EnableRoundsHandler(), + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + VMOutputCacher: txcache.NewDisabledCache(), + WasmVMChangeLocker: wasmVMChangeLocker, + FailedTxLogsAccumulator: failedTxLogsAccumulator, } scProcessorProxy, err := processProxy.NewSmartContractProcessorProxy(argsNewScProcessor, pcf.epochNotifier) @@ -754,8 +781,14 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } + argsStakingDataProvider := metachainEpochStart.StakingDataProviderArgs{ + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + SystemVM: systemVM, + MinNodePrice: pcf.systemSCConfig.StakingSystemSCConfig.GenesisNodePrice, + } + // TODO: in case of changing the minimum node price, make sure to update the staking data provider - stakingDataProvider, err := metachainEpochStart.NewStakingDataProvider(systemVM, pcf.systemSCConfig.StakingSystemSCConfig.GenesisNodePrice) + stakingDataProvider, err := metachainEpochStart.NewStakingDataProvider(argsStakingDataProvider) if err != nil { return nil, err } @@ -770,6 +803,13 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } + stakingDataProviderAPI, err := metachainEpochStart.NewStakingDataProvider(argsStakingDataProvider) + if err != nil { + return nil, err + } + + pcf.stakingDataProviderAPI = stakingDataProviderAPI + argsEpochRewards := metachainEpochStart.RewardsCreatorProxyArgs{ BaseRewardsCreatorArgs: metachainEpochStart.BaseRewardsCreatorArgs{ ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), @@ -852,6 +892,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( OutportDataProvider: outportDataProvider, BlockProcessingCutoffHandler: blockProcessingCutoffhandler, ManagedPeersHolder: pcf.crypto.ManagedPeersHolder(), + SentSignaturesTracker: sentSignaturesTracker, } esdtOwnerAddress, err := pcf.coreData.AddressPubKeyConverter().Decode(pcf.systemSCConfig.ESDTSystemSCConfig.OwnerAddress) @@ -860,25 +901,79 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( "in processComponentsFactory.newMetaBlockProcessor", err) } + maxNodesChangeConfigProvider, err := notifier.NewNodesConfigProvider( + pcf.epochNotifier, + enableEpochs.MaxNodesChangeEnableEpoch, + ) + if err != nil { + return nil, err + } + + argsAuctionListDisplayer := metachainEpochStart.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachainEpochStart.NewTableDisplayer(), + ValidatorPubKeyConverter: pcf.coreData.ValidatorPubKeyConverter(), + AddressPubKeyConverter: pcf.coreData.AddressPubKeyConverter(), + AuctionConfig: pcf.systemSCConfig.SoftAuctionConfig, + Denomination: pcf.economicsConfig.GlobalSettings.Denomination, + } + auctionListDisplayer, err := metachainEpochStart.NewAuctionListDisplayer(argsAuctionListDisplayer) + if err != nil { + return nil, err + } + + argsAuctionListSelector := metachainEpochStart.AuctionListSelectorArgs{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: auctionListDisplayer, + SoftAuctionConfig: pcf.systemSCConfig.SoftAuctionConfig, + Denomination: pcf.economicsConfig.GlobalSettings.Denomination, + } + auctionListSelector, err := metachainEpochStart.NewAuctionListSelector(argsAuctionListSelector) + if err != nil { + return nil, err + } + + maxNodesChangeConfigProviderAPI, err := notifier.NewNodesConfigProviderAPI(pcf.epochNotifier, pcf.epochConfig.EnableEpochs) + if err != nil { + return nil, err + } + argsAuctionListSelectorAPI := metachainEpochStart.AuctionListSelectorArgs{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + StakingDataProvider: stakingDataProviderAPI, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProviderAPI, + SoftAuctionConfig: pcf.systemSCConfig.SoftAuctionConfig, + Denomination: pcf.economicsConfig.GlobalSettings.Denomination, + AuctionListDisplayHandler: factoryDisabled.NewDisabledAuctionListDisplayer(), + } + auctionListSelectorAPI, err := metachainEpochStart.NewAuctionListSelector(argsAuctionListSelectorAPI) + if err != nil { + return nil, err + } + + pcf.auctionListSelectorAPI = auctionListSelectorAPI + argsEpochSystemSC := metachainEpochStart.ArgsNewEpochStartSystemSCProcessing{ - SystemVM: systemVM, - UserAccountsDB: pcf.state.AccountsAdapter(), - PeerAccountsDB: pcf.state.PeerAccounts(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - StartRating: pcf.coreData.RatingsData().StartRating(), - ValidatorInfoCreator: validatorStatisticsProcessor, - EndOfEpochCallerAddress: vm.EndOfEpochAddress, - StakingSCAddress: vm.StakingSCAddress, - ChanceComputer: pcf.coreData.Rater(), - EpochNotifier: pcf.coreData.EpochNotifier(), - GenesisNodesConfig: pcf.coreData.GenesisNodesSetup(), - MaxNodesEnableConfig: enableEpochs.MaxNodesChangeEnableEpoch, - StakingDataProvider: stakingDataProvider, - NodesConfigProvider: pcf.nodesCoordinator, - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - ESDTOwnerAddressBytes: esdtOwnerAddress, - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + SystemVM: systemVM, + UserAccountsDB: pcf.state.AccountsAdapter(), + PeerAccountsDB: pcf.state.PeerAccounts(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + StartRating: pcf.coreData.RatingsData().StartRating(), + ValidatorInfoCreator: validatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: pcf.coreData.Rater(), + EpochNotifier: pcf.coreData.EpochNotifier(), + GenesisNodesConfig: pcf.coreData.GenesisNodesSetup(), + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: pcf.nodesCoordinator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ESDTOwnerAddressBytes: esdtOwnerAddress, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListSelector: auctionListSelector, } + epochStartSystemSCProcessor, err := metachainEpochStart.NewSystemSCProcessor(argsEpochSystemSC) if err != nil { return nil, err @@ -909,6 +1004,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( blockProcessorComponents := &blockProcessorAndVmFactories{ blockProcessor: metaProcessor, vmFactoryForProcessing: vmFactory, + epochSystemSCProcessor: epochStartSystemSCProcessor, } return blockProcessorComponents, nil @@ -1013,6 +1109,7 @@ func (pcf *processComponentsFactory) createVMFactoryShard( WasmVMChangeLocker: wasmVMChangeLocker, ESDTTransferParser: esdtTransferParser, Hasher: pcf.coreData.Hasher(), + PubKeyConverter: pcf.coreData.AddressPubKeyConverter(), } return shard.NewVMContainerFactory(argsNewVMFactory) @@ -1068,6 +1165,7 @@ func (pcf *processComponentsFactory) createVMFactoryMeta( ChanceComputer: pcf.coreData.Rater(), ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + NodesCoordinator: pcf.nodesCoordinator, } return metachain.NewVMContainerFactory(argsNewVMContainer) } diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index dd58982a791..4fb8f8c5d7d 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -1,6 +1,7 @@ package processing_test import ( + "fmt" "sync" "testing" @@ -20,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageManager "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" @@ -39,12 +41,12 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { _, err = pcf.Create() require.NoError(t, err) - bp, err := pcf.NewBlockProcessor( + bp, epochStartSCProc, err := pcf.NewBlockProcessor( &testscommon.RequestHandlerStub{}, - &mock.ForkDetectorStub{}, + &processMocks.ForkDetectorStub{}, &mock.EpochStartTriggerStub{}, &mock.BoostrapStorerStub{}, - &mock.ValidatorStatisticsProcessorStub{}, + &testscommon.ValidatorStatisticsProcessorStub{}, &mock.HeaderValidatorStub{}, &mock.BlockTrackerStub{}, &mock.PendingMiniBlocksHandlerStub{}, @@ -54,10 +56,13 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { &testscommon.ReceiptsRepositoryStub{}, &testscommon.BlockProcessingCutoffStub{}, &testscommon.MissingTrieNodesNotifierStub{}, + &testscommon.SentSignatureTrackerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) require.NoError(t, err) require.NotNil(t, bp) + require.Equal(t, "*disabled.epochStartSystemSCProcessor", fmt.Sprintf("%T", epochStartSCProc)) } func Test_newBlockProcessorCreatorForMeta(t *testing.T) { @@ -164,12 +169,12 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { _, err = pcf.Create() require.NoError(t, err) - bp, err := pcf.NewBlockProcessor( + bp, epochStartSCProc, err := pcf.NewBlockProcessor( &testscommon.RequestHandlerStub{}, - &mock.ForkDetectorStub{}, + &processMocks.ForkDetectorStub{}, &mock.EpochStartTriggerStub{}, &mock.BoostrapStorerStub{}, - &mock.ValidatorStatisticsProcessorStub{}, + &testscommon.ValidatorStatisticsProcessorStub{}, &mock.HeaderValidatorStub{}, &mock.BlockTrackerStub{}, &mock.PendingMiniBlocksHandlerStub{}, @@ -179,10 +184,13 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { &testscommon.ReceiptsRepositoryStub{}, &testscommon.BlockProcessingCutoffStub{}, &testscommon.MissingTrieNodesNotifierStub{}, + &testscommon.SentSignatureTrackerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) require.NoError(t, err) require.NotNil(t, bp) + require.Equal(t, "*metachain.systemSCProcessor", fmt.Sprintf("%T", epochStartSCProc)) } func createAccountAdapter( diff --git a/factory/processing/export_test.go b/factory/processing/export_test.go index 3187bd729b1..c82f01b3f6f 100644 --- a/factory/processing/export_test.go +++ b/factory/processing/export_test.go @@ -24,7 +24,9 @@ func (pcf *processComponentsFactory) NewBlockProcessor( receiptsRepository factory.ReceiptsRepository, blockProcessingCutoff cutoff.BlockProcessingCutoffHandler, missingTrieNodesNotifier common.MissingTrieNodesNotifier, -) (process.BlockProcessor, error) { + sentSignaturesTracker process.SentSignaturesTracker, + relayedV3TxProcessor process.RelayedTxV3Processor, +) (process.BlockProcessor, process.EpochStartSystemSCProcessor, error) { blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, forkDetector, @@ -40,15 +42,17 @@ func (pcf *processComponentsFactory) NewBlockProcessor( receiptsRepository, blockProcessingCutoff, missingTrieNodesNotifier, + sentSignaturesTracker, + relayedV3TxProcessor, ) if err != nil { - return nil, err + return nil, nil, err } - return blockProcessorComponents.blockProcessor, nil + return blockProcessorComponents.blockProcessor, blockProcessorComponents.epochSystemSCProcessor, nil } // CreateAPITransactionEvaluator - -func (pcf *processComponentsFactory) CreateAPITransactionEvaluator() (factory.TransactionEvaluator, process.VirtualMachinesContainerFactory, error) { - return pcf.createAPITransactionEvaluator() +func (pcf *processComponentsFactory) CreateAPITransactionEvaluator(relayedV3TxProcessor process.RelayedTxV3Processor) (factory.TransactionEvaluator, process.VirtualMachinesContainerFactory, error) { + return pcf.createAPITransactionEvaluator(relayedV3TxProcessor) } diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 3669e9a29c5..6f711a502ae 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -16,6 +16,9 @@ import ( dataBlock "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/receipt" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + vmcommonBuiltInFunctions "github.com/multiversx/mx-chain-vm-common-go/builtInFunctions" + nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" @@ -59,6 +62,7 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/process/sync" "github.com/multiversx/mx-chain-go/process/track" + "github.com/multiversx/mx-chain-go/process/transaction" "github.com/multiversx/mx-chain-go/process/transactionLog" "github.com/multiversx/mx-chain-go/process/txsSender" "github.com/multiversx/mx-chain-go/redundancy" @@ -76,8 +80,6 @@ import ( updateDisabled "github.com/multiversx/mx-chain-go/update/disabled" updateFactory "github.com/multiversx/mx-chain-go/update/factory" "github.com/multiversx/mx-chain-go/update/trigger" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - vmcommonBuiltInFunctions "github.com/multiversx/mx-chain-vm-common-go/builtInFunctions" ) // timeSpanForBadHeaders is the expiry time for an added block header hash @@ -130,6 +132,9 @@ type processComponents struct { esdtDataStorageForApi vmcommon.ESDTNFTStorageHandler accountsParser genesis.AccountsParser receiptsRepository mainFactory.ReceiptsRepository + sentSignaturesTracker process.SentSignaturesTracker + epochSystemSCProcessor process.EpochStartSystemSCProcessor + relayedTxV3Processor process.RelayedTxV3Processor } // ProcessComponentsFactoryArgs holds the arguments needed to create a process components factory @@ -139,6 +144,7 @@ type ProcessComponentsFactoryArgs struct { EpochConfig config.EpochConfig PrefConfigs config.Preferences ImportDBConfig config.ImportDbConfig + EconomicsConfig config.EconomicsConfig AccountsParser genesis.AccountsParser SmartContractParser genesis.InitialSmartContractParser GasSchedule core.GasScheduleNotifier @@ -161,6 +167,9 @@ type ProcessComponentsFactoryArgs struct { StatusComponents factory.StatusComponentsHolder StatusCoreComponents factory.StatusCoreComponentsHolder TxExecutionOrderHandler common.TxExecutionOrderHandler + + GenesisNonce uint64 + GenesisRound uint64 } type processComponentsFactory struct { @@ -169,6 +178,7 @@ type processComponentsFactory struct { epochConfig config.EpochConfig prefConfigs config.Preferences importDBConfig config.ImportDbConfig + economicsConfig config.EconomicsConfig accountsParser genesis.AccountsParser smartContractParser genesis.InitialSmartContractParser gasSchedule core.GasScheduleNotifier @@ -185,6 +195,8 @@ type processComponentsFactory struct { importHandler update.ImportHandler flagsConfig config.ContextFlagsConfig esdtNftStorage vmcommon.ESDTNFTStorageHandler + stakingDataProviderAPI peer.StakingDataProviderAPI + auctionListSelectorAPI epochStart.AuctionListSelector data factory.DataComponentsHolder coreData factory.CoreComponentsHolder @@ -195,6 +207,9 @@ type processComponentsFactory struct { statusComponents factory.StatusComponentsHolder statusCoreComponents factory.StatusCoreComponentsHolder txExecutionOrderHandler common.TxExecutionOrderHandler + + genesisNonce uint64 + genesisRound uint64 } // NewProcessComponentsFactory will return a new instance of processComponentsFactory @@ -209,6 +224,7 @@ func NewProcessComponentsFactory(args ProcessComponentsFactoryArgs) (*processCom epochConfig: args.EpochConfig, prefConfigs: args.PrefConfigs, importDBConfig: args.ImportDBConfig, + economicsConfig: args.EconomicsConfig, accountsParser: args.AccountsParser, smartContractParser: args.SmartContractParser, gasSchedule: args.GasSchedule, @@ -231,6 +247,9 @@ func NewProcessComponentsFactory(args ProcessComponentsFactoryArgs) (*processCom statusCoreComponents: args.StatusCoreComponents, flagsConfig: args.FlagsConfig, txExecutionOrderHandler: args.TxExecutionOrderHandler, + genesisNonce: args.GenesisNonce, + genesisRound: args.GenesisRound, + roundConfig: args.RoundConfig, }, nil } @@ -402,30 +421,6 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } - startEpochNum := pcf.bootstrapComponents.EpochBootstrapParams().Epoch() - if startEpochNum == 0 { - err = pcf.indexGenesisBlocks(genesisBlocks, initialTxs, genesisAccounts) - if err != nil { - return nil, err - } - } - - cacheRefreshDuration := time.Duration(pcf.config.ValidatorStatistics.CacheRefreshIntervalInSec) * time.Second - argVSP := peer.ArgValidatorsProvider{ - NodesCoordinator: pcf.nodesCoordinator, - StartEpoch: startEpochNum, - EpochStartEventNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), - CacheRefreshIntervalDurationInSec: cacheRefreshDuration, - ValidatorStatistics: validatorStatisticsProcessor, - MaxRating: pcf.maxRating, - PubKeyConverter: pcf.coreData.ValidatorPubKeyConverter(), - } - - validatorsProvider, err := peer.NewValidatorsProvider(argVSP) - if err != nil { - return nil, err - } - epochStartTrigger, err := pcf.newEpochStartTrigger(requestHandler) if err != nil { return nil, err @@ -522,6 +517,16 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } + argsRelayedTxV3Processor := transaction.ArgRelayedTxV3Processor{ + EconomicsFee: pcf.coreData.EconomicsData(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + MaxTransactionsAllowed: pcf.config.RelayedTransactionConfig.MaxTransactionsAllowed, + } + relayedTxV3Processor, err := transaction.NewRelayedTxV3Processor(argsRelayedTxV3Processor) + if err != nil { + return nil, err + } + interceptorContainerFactory, blackListHandler, err := pcf.newInterceptorContainerFactory( headerSigVerifier, pcf.bootstrapComponents.HeaderIntegrityVerifier(), @@ -531,6 +536,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { mainPeerShardMapper, fullArchivePeerShardMapper, hardforkTrigger, + relayedTxV3Processor, ) if err != nil { return nil, err @@ -606,6 +612,11 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } + sentSignaturesTracker, err := track.NewSentSignaturesTracker(pcf.crypto.KeysHandler()) + if err != nil { + return nil, fmt.Errorf("%w when assembling components for the sent signatures tracker", err) + } + blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, forkDetector, @@ -621,11 +632,40 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { receiptsRepository, blockCutoffProcessingHandler, pcf.state.MissingTrieNodesNotifier(), + sentSignaturesTracker, + relayedTxV3Processor, ) if err != nil { return nil, err } + startEpochNum := pcf.bootstrapComponents.EpochBootstrapParams().Epoch() + if startEpochNum == 0 { + err = pcf.indexGenesisBlocks(genesisBlocks, initialTxs, genesisAccounts) + if err != nil { + return nil, err + } + } + + cacheRefreshDuration := time.Duration(pcf.config.ValidatorStatistics.CacheRefreshIntervalInSec) * time.Second + argVSP := peer.ArgValidatorsProvider{ + NodesCoordinator: pcf.nodesCoordinator, + StartEpoch: startEpochNum, + EpochStartEventNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), + CacheRefreshIntervalDurationInSec: cacheRefreshDuration, + ValidatorStatistics: validatorStatisticsProcessor, + MaxRating: pcf.maxRating, + ValidatorPubKeyConverter: pcf.coreData.ValidatorPubKeyConverter(), + AddressPubKeyConverter: pcf.coreData.AddressPubKeyConverter(), + AuctionListSelector: pcf.auctionListSelectorAPI, + StakingDataProvider: pcf.stakingDataProviderAPI, + } + + validatorsProvider, err := peer.NewValidatorsProvider(argVSP) + if err != nil { + return nil, err + } + conversionBase := 10 genesisNodePrice, ok := big.NewInt(0).SetString(pcf.systemSCConfig.StakingSystemSCConfig.GenesisNodePrice, conversionBase) if !ok { @@ -683,7 +723,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } - apiTransactionEvaluator, vmFactoryForTxSimulate, err := pcf.createAPITransactionEvaluator() + apiTransactionEvaluator, vmFactoryForTxSimulate, err := pcf.createAPITransactionEvaluator(relayedTxV3Processor) if err != nil { return nil, fmt.Errorf("%w when assembling components for the transactions simulator processor", err) } @@ -727,6 +767,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { currentEpochProvider: currentEpochProvider, vmFactoryForTxSimulator: vmFactoryForTxSimulate, vmFactoryForProcessing: blockProcessorComponents.vmFactoryForProcessing, + epochSystemSCProcessor: blockProcessorComponents.epochSystemSCProcessor, scheduledTxsExecutionHandler: scheduledTxsExecutionHandler, txsSender: txsSenderWithAccumulator, hardforkTrigger: hardforkTrigger, @@ -734,11 +775,12 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { esdtDataStorageForApi: pcf.esdtNftStorage, accountsParser: pcf.accountsParser, receiptsRepository: receiptsRepository, + sentSignaturesTracker: sentSignaturesTracker, + relayedTxV3Processor: relayedTxV3Processor, }, nil } func (pcf *processComponentsFactory) newValidatorStatisticsProcessor() (process.ValidatorStatisticsProcessor, error) { - storageService := pcf.data.StorageService() var peerDataPool peer.DataPool = pcf.data.Datapool() @@ -802,21 +844,22 @@ func (pcf *processComponentsFactory) newEpochStartTrigger(requestHandler epochSt } argEpochStart := &shardchain.ArgsShardEpochStartTrigger{ - Marshalizer: pcf.coreData.InternalMarshalizer(), - Hasher: pcf.coreData.Hasher(), - HeaderValidator: headerValidator, - Uint64Converter: pcf.coreData.Uint64ByteSliceConverter(), - DataPool: pcf.data.Datapool(), - Storage: pcf.data.StorageService(), - RequestHandler: requestHandler, - Epoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), - EpochStartNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), - Validity: process.MetaBlockValidity, - Finality: process.BlockFinality, - PeerMiniBlocksSyncer: peerMiniBlockSyncer, - RoundHandler: pcf.coreData.RoundHandler(), - AppStatusHandler: pcf.statusCoreComponents.AppStatusHandler(), - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Hasher: pcf.coreData.Hasher(), + HeaderValidator: headerValidator, + Uint64Converter: pcf.coreData.Uint64ByteSliceConverter(), + DataPool: pcf.data.Datapool(), + Storage: pcf.data.StorageService(), + RequestHandler: requestHandler, + Epoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), + EpochStartNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), + Validity: process.MetaBlockValidity, + Finality: process.BlockFinality, + PeerMiniBlocksSyncer: peerMiniBlockSyncer, + RoundHandler: pcf.coreData.RoundHandler(), + AppStatusHandler: pcf.statusCoreComponents.AppStatusHandler(), + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + ExtraDelayForRequestBlockInfo: time.Duration(pcf.config.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds) * time.Millisecond, } return shardchain.NewEpochStartTrigger(argEpochStart) } @@ -872,13 +915,17 @@ func (pcf *processComponentsFactory) generateGenesisHeadersAndApplyInitialBalanc HardForkConfig: pcf.config.Hardfork, TrieStorageManagers: pcf.state.TrieStorageManagers(), SystemSCConfig: *pcf.systemSCConfig, - RoundConfig: &pcf.roundConfig, - EpochConfig: &pcf.epochConfig, + RoundConfig: pcf.roundConfig, + EpochConfig: pcf.epochConfig, + HeaderVersionConfigs: pcf.config.Versions, BlockSignKeyGen: pcf.crypto.BlockSignKeyGen(), HistoryRepository: pcf.historyRepo, GenesisNodePrice: genesisNodePrice, GenesisString: pcf.config.GeneralSettings.GenesisString, TxExecutionOrderHandler: pcf.txExecutionOrderHandler, + GenesisEpoch: pcf.config.EpochStartConfig.GenesisEpoch, + GenesisNonce: pcf.genesisNonce, + GenesisRound: pcf.genesisRound, } gbc, err := processGenesis.NewGenesisBlockCreator(arg) @@ -1357,23 +1404,24 @@ func (pcf *processComponentsFactory) newShardResolverContainerFactory( } resolversContainerFactoryArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - MainMessenger: pcf.network.NetworkMessenger(), - FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), - Store: pcf.data.StorageService(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - DataPools: pcf.data.Datapool(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - DataPacker: dataPacker, - TriesContainer: pcf.state.TriesContainer(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, - MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), - PayloadValidator: payloadValidator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), + Store: pcf.data.StorageService(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + DataPools: pcf.data.Datapool(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + DataPacker: dataPacker, + TriesContainer: pcf.state.TriesContainer(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, + NumConcurrentResolvingTrieNodesJobs: pcf.config.Antiflood.NumConcurrentResolvingTrieNodesJobs, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, + MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), + PayloadValidator: payloadValidator, } resolversContainerFactory, err := resolverscontainer.NewShardResolversContainerFactory(resolversContainerFactoryArgs) if err != nil { @@ -1393,23 +1441,24 @@ func (pcf *processComponentsFactory) newMetaResolverContainerFactory( } resolversContainerFactoryArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - MainMessenger: pcf.network.NetworkMessenger(), - FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), - Store: pcf.data.StorageService(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - DataPools: pcf.data.Datapool(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - DataPacker: dataPacker, - TriesContainer: pcf.state.TriesContainer(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, - MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), - PayloadValidator: payloadValidator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), + Store: pcf.data.StorageService(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + DataPools: pcf.data.Datapool(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + DataPacker: dataPacker, + TriesContainer: pcf.state.TriesContainer(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, + NumConcurrentResolvingTrieNodesJobs: pcf.config.Antiflood.NumConcurrentResolvingTrieNodesJobs, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, + MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), + PayloadValidator: payloadValidator, } return resolverscontainer.NewMetaResolversContainerFactory(resolversContainerFactoryArgs) @@ -1459,6 +1508,7 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( mainPeerShardMapper *networksharding.PeerShardMapper, fullArchivePeerShardMapper *networksharding.PeerShardMapper, hardforkTrigger factory.HardforkTrigger, + relayedTxV3Processor process.RelayedTxV3Processor, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { nodeOperationMode := common.NormalOperation if pcf.prefConfigs.Preferences.FullArchive { @@ -1477,6 +1527,7 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( fullArchivePeerShardMapper, hardforkTrigger, nodeOperationMode, + relayedTxV3Processor, ) } if shardCoordinator.SelfId() == core.MetachainShardId { @@ -1490,6 +1541,7 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( fullArchivePeerShardMapper, hardforkTrigger, nodeOperationMode, + relayedTxV3Processor, ) } @@ -1629,6 +1681,7 @@ func (pcf *processComponentsFactory) newShardInterceptorContainerFactory( fullArchivePeerShardMapper *networksharding.PeerShardMapper, hardforkTrigger factory.HardforkTrigger, nodeOperationMode common.NodeOperation, + relayedTxV3Processor process.RelayedTxV3Processor, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { headerBlackList := cache.NewTimeCache(timeSpanForBadHeaders) shardInterceptorsContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ @@ -1662,6 +1715,7 @@ func (pcf *processComponentsFactory) newShardInterceptorContainerFactory( FullArchivePeerShardMapper: fullArchivePeerShardMapper, HardforkTrigger: hardforkTrigger, NodeOperationMode: nodeOperationMode, + RelayedTxV3Processor: relayedTxV3Processor, } interceptorContainerFactory, err := interceptorscontainer.NewShardInterceptorsContainerFactory(shardInterceptorsContainerFactoryArgs) @@ -1682,6 +1736,7 @@ func (pcf *processComponentsFactory) newMetaInterceptorContainerFactory( fullArchivePeerShardMapper *networksharding.PeerShardMapper, hardforkTrigger factory.HardforkTrigger, nodeOperationMode common.NodeOperation, + relayedTxV3Processor process.RelayedTxV3Processor, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { headerBlackList := cache.NewTimeCache(timeSpanForBadHeaders) metaInterceptorsContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ @@ -1715,6 +1770,7 @@ func (pcf *processComponentsFactory) newMetaInterceptorContainerFactory( FullArchivePeerShardMapper: fullArchivePeerShardMapper, HardforkTrigger: hardforkTrigger, NodeOperationMode: nodeOperationMode, + RelayedTxV3Processor: relayedTxV3Processor, } interceptorContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(metaInterceptorsContainerFactoryArgs) diff --git a/factory/processing/processComponentsHandler.go b/factory/processing/processComponentsHandler.go index b544ba901ef..94f21a5b8f2 100644 --- a/factory/processing/processComponentsHandler.go +++ b/factory/processing/processComponentsHandler.go @@ -55,7 +55,7 @@ func (m *managedProcessComponents) Create() error { return nil } -// Close will close all underlying sub-components +// Close will close all underlying subcomponents func (m *managedProcessComponents) Close() error { m.mutProcessComponents.Lock() defer m.mutProcessComponents.Unlock() @@ -174,6 +174,15 @@ func (m *managedProcessComponents) CheckSubcomponents() error { if check.IfNil(m.processComponents.esdtDataStorageForApi) { return errors.ErrNilESDTDataStorage } + if check.IfNil(m.processComponents.sentSignaturesTracker) { + return errors.ErrNilSentSignatureTracker + } + if check.IfNil(m.processComponents.epochSystemSCProcessor) { + return errors.ErrNilEpochSystemSCProcessor + } + if check.IfNil(m.processComponents.relayedTxV3Processor) { + return errors.ErrNilRelayedTxV3Processor + } return nil } @@ -658,6 +667,42 @@ func (m *managedProcessComponents) ReceiptsRepository() factory.ReceiptsReposito return m.processComponents.receiptsRepository } +// SentSignaturesTracker returns the signature tracker +func (m *managedProcessComponents) SentSignaturesTracker() process.SentSignaturesTracker { + m.mutProcessComponents.RLock() + defer m.mutProcessComponents.RUnlock() + + if m.processComponents == nil { + return nil + } + + return m.processComponents.sentSignaturesTracker +} + +// EpochSystemSCProcessor returns the epoch start system SC processor +func (m *managedProcessComponents) EpochSystemSCProcessor() process.EpochStartSystemSCProcessor { + m.mutProcessComponents.RLock() + defer m.mutProcessComponents.RUnlock() + + if m.processComponents == nil { + return nil + } + + return m.processComponents.epochSystemSCProcessor +} + +// RelayedTxV3Processor returns the relayed tx v3 processor +func (m *managedProcessComponents) RelayedTxV3Processor() process.RelayedTxV3Processor { + m.mutProcessComponents.RLock() + defer m.mutProcessComponents.RUnlock() + + if m.processComponents == nil { + return nil + } + + return m.processComponents.relayedTxV3Processor +} + // IsInterfaceNil returns true if the interface is nil func (m *managedProcessComponents) IsInterfaceNil() bool { return m == nil diff --git a/factory/processing/processComponentsHandler_test.go b/factory/processing/processComponentsHandler_test.go index 152b7637dc6..c999d25b041 100644 --- a/factory/processing/processComponentsHandler_test.go +++ b/factory/processing/processComponentsHandler_test.go @@ -92,6 +92,9 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.True(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) require.True(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) require.True(t, check.IfNil(managedProcessComponents.FullArchiveInterceptorsContainer())) + require.True(t, check.IfNil(managedProcessComponents.SentSignaturesTracker())) + require.True(t, check.IfNil(managedProcessComponents.EpochSystemSCProcessor())) + require.True(t, check.IfNil(managedProcessComponents.RelayedTxV3Processor())) err := managedProcessComponents.Create() require.NoError(t, err) @@ -135,6 +138,9 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.False(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) require.False(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) require.False(t, check.IfNil(managedProcessComponents.FullArchiveInterceptorsContainer())) + require.False(t, check.IfNil(managedProcessComponents.SentSignaturesTracker())) + require.False(t, check.IfNil(managedProcessComponents.EpochSystemSCProcessor())) + require.False(t, check.IfNil(managedProcessComponents.RelayedTxV3Processor())) require.Equal(t, factory.ProcessComponentsName, managedProcessComponents.String()) }) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index e44a9d71515..c4ae1e3dd9d 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -44,6 +44,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + nodesSetupMock "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" @@ -79,8 +80,19 @@ var ( func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFactoryArgs { args := processComp.ProcessComponentsFactoryArgs{ - Config: testscommon.GetGeneralConfig(), - EpochConfig: config.EpochConfig{}, + Config: testscommon.GetGeneralConfig(), + EpochConfig: config.EpochConfig{ + EnableEpochs: config.EnableEpochs{ + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 100, + NodesToShufflePerShard: 2, + }, + }, + }, + }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), PrefConfigs: config.Preferences{}, ImportDBConfig: config.ImportDbConfig{}, FlagsConfig: config.ContextFlagsConfig{ @@ -128,7 +140,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto MaxVotingDelayPeriodInEpochs: 30, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ - GenesisNodePrice: "2500000000000000000000", + GenesisNodePrice: "2500", MinStakeValue: "1", UnJailValue: "1", MinStepValue: "1", @@ -139,6 +151,8 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + NodeLimitPercentage: 100.0, + StakeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -149,6 +163,12 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ImportStartHandler: &testscommon.ImportStartHandlerStub{}, HistoryRepo: &dblookupext.HistoryRepositoryStub{}, @@ -171,7 +191,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, AddrPubKeyConv: addrPubKeyConv, ValPubKeyConv: valPubKeyConv, - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &nodesSetupMock.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return 2 }, @@ -217,6 +237,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + KeysHandlerField: &testscommon.KeysHandlerStub{}, }, Network: &testsMocks.NetworkComponentsStub{ Messenger: &p2pmocks.MessengerStub{}, @@ -244,7 +265,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } - args.State = components.GetStateComponents(args.CoreData) + args.State = components.GetStateComponents(args.CoreData, args.StatusCoreComponents) return args } @@ -353,7 +374,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: nil, } pcf, err := processComp.NewProcessComponentsFactory(args) @@ -366,7 +387,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: nil, } @@ -380,7 +401,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, ValPubKeyConv: nil, @@ -395,7 +416,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, ValPubKeyConv: &testscommon.PubkeyConverterStub{}, @@ -411,7 +432,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, ValPubKeyConv: &testscommon.PubkeyConverterStub{}, @@ -732,7 +753,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args := createMockProcessComponentsFactoryArgs() coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) coreCompStub.GenesisNodesSetupCalled = func() sharding.GenesisNodesSetupHandler { - return &testscommon.NodesSetupStub{ + return &nodesSetupMock.NodesSetupStub{ AllInitialNodesCalled: func() []nodesCoordinator.GenesisNodeInfoHandler { return []nodesCoordinator.GenesisNodeInfoHandler{ &genesisMocks.GenesisNodeInfoHandlerMock{ diff --git a/factory/processing/txSimulatorProcessComponents.go b/factory/processing/txSimulatorProcessComponents.go index 2a5e8c5a7a2..21fe2ddc073 100644 --- a/factory/processing/txSimulatorProcessComponents.go +++ b/factory/processing/txSimulatorProcessComponents.go @@ -27,7 +27,7 @@ import ( datafield "github.com/multiversx/mx-chain-vm-common-go/parsers/dataField" ) -func (pcf *processComponentsFactory) createAPITransactionEvaluator() (factory.TransactionEvaluator, process.VirtualMachinesContainerFactory, error) { +func (pcf *processComponentsFactory) createAPITransactionEvaluator(relayedTxV3Processor process.RelayedTxV3Processor) (factory.TransactionEvaluator, process.VirtualMachinesContainerFactory, error) { simulationAccountsDB, err := transactionEvaluator.NewSimulationAccountsDB(pcf.state.AccountsAdapterAPI()) if err != nil { return nil, nil, err @@ -47,7 +47,7 @@ func (pcf *processComponentsFactory) createAPITransactionEvaluator() (factory.Tr return nil, nil, err } - txSimulatorProcessorArgs, vmContainerFactory, txTypeHandler, err := pcf.createArgsTxSimulatorProcessor(simulationAccountsDB, vmOutputCacher, txLogsProcessor) + txSimulatorProcessorArgs, vmContainerFactory, txTypeHandler, err := pcf.createArgsTxSimulatorProcessor(simulationAccountsDB, vmOutputCacher, txLogsProcessor, relayedTxV3Processor) if err != nil { return nil, nil, err } @@ -79,6 +79,7 @@ func (pcf *processComponentsFactory) createAPITransactionEvaluator() (factory.Tr Accounts: simulationAccountsDB, ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + BlockChain: pcf.data.Blockchain(), }) return apiTransactionEvaluator, vmContainerFactory, err @@ -88,12 +89,13 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessor( accountsAdapter state.AccountsAdapter, vmOutputCacher storage.Cacher, txLogsProcessor process.TransactionLogProcessor, + relayedTxV3Processor process.RelayedTxV3Processor, ) (transactionEvaluator.ArgsTxSimulator, process.VirtualMachinesContainerFactory, process.TxTypeHandler, error) { shardID := pcf.bootstrapComponents.ShardCoordinator().SelfId() if shardID == core.MetachainShardId { return pcf.createArgsTxSimulatorProcessorForMeta(accountsAdapter, vmOutputCacher, txLogsProcessor) } else { - return pcf.createArgsTxSimulatorProcessorShard(accountsAdapter, vmOutputCacher, txLogsProcessor) + return pcf.createArgsTxSimulatorProcessorShard(accountsAdapter, vmOutputCacher, txLogsProcessor, relayedTxV3Processor) } } @@ -141,6 +143,8 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorForMeta( return args, nil, nil, err } + args.BlockChainHook = vmContainerFactory.BlockChainHookImpl() + vmContainer, err := vmContainerFactory.Create() if err != nil { return args, nil, nil, err @@ -169,29 +173,32 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorForMeta( return args, nil, nil, err } + failedTxLogsAccumulator := transactionLog.NewFailedTxLogsAccumulator() + scProcArgs := scrCommon.ArgsNewSmartContractProcessor{ - VmContainer: vmContainer, - ArgsParser: smartContract.NewArgumentParser(), - Hasher: pcf.coreData.Hasher(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - AccountsDB: accountsAdapter, - BlockChainHook: vmContainerFactory.BlockChainHookImpl(), - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - PubkeyConv: pcf.coreData.AddressPubKeyConverter(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - ScrForwarder: scForwarder, - TxFeeHandler: &processDisabled.FeeHandler{}, - EconomicsFee: pcf.coreData.EconomicsData(), - TxTypeHandler: txTypeHandler, - GasHandler: gasHandler, - GasSchedule: pcf.gasSchedule, - TxLogsProcessor: txLogsProcessor, - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), - EnableRoundsHandler: pcf.coreData.EnableRoundsHandler(), - BadTxForwarder: badTxInterim, - VMOutputCacher: vmOutputCacher, - WasmVMChangeLocker: pcf.coreData.WasmVMChangeLocker(), - IsGenesisProcessing: false, + VmContainer: vmContainer, + ArgsParser: smartContract.NewArgumentParser(), + Hasher: pcf.coreData.Hasher(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + AccountsDB: accountsAdapter, + BlockChainHook: vmContainerFactory.BlockChainHookImpl(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScrForwarder: scForwarder, + TxFeeHandler: &processDisabled.FeeHandler{}, + EconomicsFee: pcf.coreData.EconomicsData(), + TxTypeHandler: txTypeHandler, + GasHandler: gasHandler, + GasSchedule: pcf.gasSchedule, + TxLogsProcessor: txLogsProcessor, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + EnableRoundsHandler: pcf.coreData.EnableRoundsHandler(), + BadTxForwarder: badTxInterim, + VMOutputCacher: vmOutputCacher, + WasmVMChangeLocker: pcf.coreData.WasmVMChangeLocker(), + IsGenesisProcessing: false, + FailedTxLogsAccumulator: failedTxLogsAccumulator, } scProcessor, err := smartContract.NewSmartContractProcessor(scProcArgs) @@ -246,6 +253,7 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorShard( accountsAdapter state.AccountsAdapter, vmOutputCacher storage.Cacher, txLogsProcessor process.TransactionLogProcessor, + relayedTxV3Processor process.RelayedTxV3Processor, ) (transactionEvaluator.ArgsTxSimulator, process.VirtualMachinesContainerFactory, process.TxTypeHandler, error) { args := transactionEvaluator.ArgsTxSimulator{} @@ -301,6 +309,8 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorShard( return args, nil, nil, err } + args.BlockChainHook = vmContainerFactory.BlockChainHookImpl() + err = builtInFuncFactory.SetPayableHandler(vmContainerFactory.BlockChainHookImpl()) if err != nil { return args, nil, nil, err @@ -341,29 +351,32 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorShard( argsParser := smartContract.NewArgumentParser() + failedTxLogsAccumulator := transactionLog.NewFailedTxLogsAccumulator() + scProcArgs := scrCommon.ArgsNewSmartContractProcessor{ - VmContainer: vmContainer, - ArgsParser: argsParser, - Hasher: pcf.coreData.Hasher(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - AccountsDB: accountsAdapter, - BlockChainHook: vmContainerFactory.BlockChainHookImpl(), - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - PubkeyConv: pcf.coreData.AddressPubKeyConverter(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - ScrForwarder: scForwarder, - TxFeeHandler: &processDisabled.FeeHandler{}, - EconomicsFee: pcf.coreData.EconomicsData(), - TxTypeHandler: txTypeHandler, - GasHandler: gasHandler, - GasSchedule: pcf.gasSchedule, - TxLogsProcessor: txLogsProcessor, - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), - EnableRoundsHandler: pcf.coreData.EnableRoundsHandler(), - BadTxForwarder: badTxInterim, - VMOutputCacher: vmOutputCacher, - WasmVMChangeLocker: pcf.coreData.WasmVMChangeLocker(), - IsGenesisProcessing: false, + VmContainer: vmContainer, + ArgsParser: argsParser, + Hasher: pcf.coreData.Hasher(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + AccountsDB: accountsAdapter, + BlockChainHook: vmContainerFactory.BlockChainHookImpl(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScrForwarder: scForwarder, + TxFeeHandler: &processDisabled.FeeHandler{}, + EconomicsFee: pcf.coreData.EconomicsData(), + TxTypeHandler: txTypeHandler, + GasHandler: gasHandler, + GasSchedule: pcf.gasSchedule, + TxLogsProcessor: txLogsProcessor, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + EnableRoundsHandler: pcf.coreData.EnableRoundsHandler(), + BadTxForwarder: badTxInterim, + VMOutputCacher: vmOutputCacher, + WasmVMChangeLocker: pcf.coreData.WasmVMChangeLocker(), + IsGenesisProcessing: false, + FailedTxLogsAccumulator: failedTxLogsAccumulator, } scProcessor, err := smartContract.NewSmartContractProcessor(scProcArgs) @@ -372,25 +385,27 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorShard( } argsTxProcessor := transaction.ArgsNewTxProcessor{ - Accounts: accountsAdapter, - Hasher: pcf.coreData.Hasher(), - PubkeyConv: pcf.coreData.AddressPubKeyConverter(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - SignMarshalizer: pcf.coreData.TxMarshalizer(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - ScProcessor: scProcessor, - TxFeeHandler: txFeeHandler, - TxTypeHandler: txTypeHandler, - EconomicsFee: pcf.coreData.EconomicsData(), - ReceiptForwarder: receiptTxInterim, - BadTxForwarder: badTxInterim, - ArgsParser: argsParser, - ScrForwarder: scForwarder, - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), - EnableRoundsHandler: pcf.coreData.EnableRoundsHandler(), - TxVersionChecker: pcf.coreData.TxVersionChecker(), - GuardianChecker: pcf.bootstrapComponents.GuardedAccountHandler(), - TxLogsProcessor: txLogsProcessor, + Accounts: accountsAdapter, + Hasher: pcf.coreData.Hasher(), + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + SignMarshalizer: pcf.coreData.TxMarshalizer(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScProcessor: scProcessor, + TxFeeHandler: txFeeHandler, + TxTypeHandler: txTypeHandler, + EconomicsFee: pcf.coreData.EconomicsData(), + ReceiptForwarder: receiptTxInterim, + BadTxForwarder: badTxInterim, + ArgsParser: argsParser, + ScrForwarder: scForwarder, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + EnableRoundsHandler: pcf.coreData.EnableRoundsHandler(), + TxVersionChecker: pcf.coreData.TxVersionChecker(), + GuardianChecker: pcf.bootstrapComponents.GuardedAccountHandler(), + TxLogsProcessor: txLogsProcessor, + RelayedTxV3Processor: relayedTxV3Processor, + FailedTxLogsAccumulator: failedTxLogsAccumulator, } txProcessor, err := transaction.NewTxProcessor(argsTxProcessor) diff --git a/factory/processing/txSimulatorProcessComponents_test.go b/factory/processing/txSimulatorProcessComponents_test.go index aad848600d8..37944768bfe 100644 --- a/factory/processing/txSimulatorProcessComponents_test.go +++ b/factory/processing/txSimulatorProcessComponents_test.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-go/factory/processing" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" "github.com/stretchr/testify/assert" ) @@ -26,7 +27,7 @@ func TestManagedProcessComponents_createAPITransactionEvaluator(t *testing.T) { processArgs.Config.VMOutputCacher.Type = "invalid" pcf, _ := processing.NewProcessComponentsFactory(processArgs) - apiTransactionEvaluator, vmContainerFactory, err := pcf.CreateAPITransactionEvaluator() + apiTransactionEvaluator, vmContainerFactory, err := pcf.CreateAPITransactionEvaluator(&processMocks.RelayedTxV3ProcessorMock{}) assert.NotNil(t, err) assert.True(t, check.IfNil(apiTransactionEvaluator)) assert.True(t, check.IfNil(vmContainerFactory)) @@ -36,7 +37,7 @@ func TestManagedProcessComponents_createAPITransactionEvaluator(t *testing.T) { processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinatorForShardID2) pcf, _ := processing.NewProcessComponentsFactory(processArgs) - apiTransactionEvaluator, vmContainerFactory, err := pcf.CreateAPITransactionEvaluator() + apiTransactionEvaluator, vmContainerFactory, err := pcf.CreateAPITransactionEvaluator(&processMocks.RelayedTxV3ProcessorMock{}) assert.Nil(t, err) assert.False(t, check.IfNil(apiTransactionEvaluator)) assert.False(t, check.IfNil(vmContainerFactory)) @@ -45,7 +46,7 @@ func TestManagedProcessComponents_createAPITransactionEvaluator(t *testing.T) { processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinatorForMetachain) pcf, _ := processing.NewProcessComponentsFactory(processArgs) - apiTransactionEvaluator, vmContainerFactory, err := pcf.CreateAPITransactionEvaluator() + apiTransactionEvaluator, vmContainerFactory, err := pcf.CreateAPITransactionEvaluator(&processMocks.RelayedTxV3ProcessorMock{}) assert.Nil(t, err) assert.False(t, check.IfNil(apiTransactionEvaluator)) assert.False(t, check.IfNil(vmContainerFactory)) diff --git a/factory/state/stateComponentsHandler_test.go b/factory/state/stateComponentsHandler_test.go index ba552ed416a..e73600180ff 100644 --- a/factory/state/stateComponentsHandler_test.go +++ b/factory/state/stateComponentsHandler_test.go @@ -27,7 +27,7 @@ func TestNewManagedStateComponents(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, err) @@ -42,7 +42,7 @@ func TestManagedStateComponents_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, err) @@ -56,7 +56,7 @@ func TestManagedStateComponents_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, err) @@ -87,7 +87,7 @@ func TestManagedStateComponents_Close(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, managedStateComponents.Close()) @@ -102,7 +102,7 @@ func TestManagedStateComponents_CheckSubcomponents(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) err := managedStateComponents.CheckSubcomponents() @@ -121,7 +121,7 @@ func TestManagedStateComponents_Setters(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) err := managedStateComponents.Create() @@ -153,7 +153,7 @@ func TestManagedStateComponents_IsInterfaceNil(t *testing.T) { require.True(t, managedStateComponents.IsInterfaceNil()) coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ = stateComp.NewManagedStateComponents(stateComponentsFactory) require.False(t, managedStateComponents.IsInterfaceNil()) diff --git a/factory/state/stateComponents_test.go b/factory/state/stateComponents_test.go index 177407226d8..bf5068e8dd7 100644 --- a/factory/state/stateComponents_test.go +++ b/factory/state/stateComponents_test.go @@ -20,7 +20,7 @@ func TestNewStateComponentsFactory(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) args.Core = nil scf, err := stateComp.NewStateComponentsFactory(args) @@ -31,7 +31,7 @@ func TestNewStateComponentsFactory(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) args.StatusCore = nil scf, err := stateComp.NewStateComponentsFactory(args) @@ -42,7 +42,7 @@ func TestNewStateComponentsFactory(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) scf, err := stateComp.NewStateComponentsFactory(args) require.NoError(t, err) @@ -57,7 +57,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { return nil @@ -73,7 +73,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) args.Config.EvictionWaitingList.RootHashesSize = 0 scf, _ := stateComp.NewStateComponentsFactory(args) @@ -85,7 +85,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) cnt := 0 @@ -107,7 +107,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) cnt := 0 @@ -129,7 +129,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) scf, _ := stateComp.NewStateComponentsFactory(args) sc, err := scf.Create() @@ -143,7 +143,7 @@ func TestStateComponents_Close(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) scf, _ := stateComp.NewStateComponentsFactory(args) sc, err := scf.Create() diff --git a/factory/status/statusComponentsHandler_test.go b/factory/status/statusComponentsHandler_test.go index ee81a353e31..c7252cbf6de 100644 --- a/factory/status/statusComponentsHandler_test.go +++ b/factory/status/statusComponentsHandler_test.go @@ -16,18 +16,14 @@ import ( ) func TestNewManagedStatusComponents(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("nil factory should error", func(t *testing.T) { - t.Parallel() - managedStatusComponents, err := statusComp.NewManagedStatusComponents(nil) require.Equal(t, errorsMx.ErrNilStatusComponentsFactory, err) require.Nil(t, managedStatusComponents) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) require.Nil(t, err) managedStatusComponents, err := statusComp.NewManagedStatusComponents(scf) @@ -37,11 +33,9 @@ func TestNewManagedStatusComponents(t *testing.T) { } func TestManagedStatusComponents_Create(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("invalid params should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.StatusCoreComponents = &factoryMocks.StatusCoreComponentsStub{ AppStatusHandlerField: nil, @@ -56,8 +50,6 @@ func TestManagedStatusComponents_Create(t *testing.T) { require.Error(t, err) }) t.Run("should work with getters", func(t *testing.T) { - t.Parallel() - scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) require.Nil(t, err) managedStatusComponents, err := statusComp.NewManagedStatusComponents(scf) @@ -78,7 +70,7 @@ func TestManagedStatusComponents_Create(t *testing.T) { } func TestManagedStatusComponents_Close(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) @@ -96,7 +88,7 @@ func TestManagedStatusComponents_Close(t *testing.T) { } func TestManagedStatusComponents_CheckSubcomponents(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) @@ -112,7 +104,7 @@ func TestManagedStatusComponents_CheckSubcomponents(t *testing.T) { } func TestManagedStatusComponents_SetForkDetector(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) @@ -126,11 +118,9 @@ func TestManagedStatusComponents_SetForkDetector(t *testing.T) { } func TestManagedStatusComponents_StartPolling(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("NewAppStatusPolling fails should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.Config.GeneralSettings.StatusPollingIntervalSec = 0 scf, _ := statusComp.NewStatusComponentsFactory(args) @@ -142,8 +132,6 @@ func TestManagedStatusComponents_StartPolling(t *testing.T) { require.Equal(t, errorsMx.ErrStatusPollingInit, err) }) t.Run("RegisterPollingFunc fails should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.Config.GeneralSettings.StatusPollingIntervalSec = 0 scf, _ := statusComp.NewStatusComponentsFactory(args) @@ -155,8 +143,6 @@ func TestManagedStatusComponents_StartPolling(t *testing.T) { require.Equal(t, errorsMx.ErrStatusPollingInit, err) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) err := managedStatusComponents.Create() @@ -168,7 +154,7 @@ func TestManagedStatusComponents_StartPolling(t *testing.T) { } func TestComputeNumConnectedPeers(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("main network", testComputeNumConnectedPeers("")) t.Run("full archive network", testComputeNumConnectedPeers(common.FullArchiveMetricSuffix)) @@ -176,8 +162,6 @@ func TestComputeNumConnectedPeers(t *testing.T) { func testComputeNumConnectedPeers(suffix string) func(t *testing.T) { return func(t *testing.T) { - t.Parallel() - netMes := &p2pmocks.MessengerStub{ ConnectedAddressesCalled: func() []string { return []string{"addr1", "addr2", "addr3"} @@ -195,7 +179,7 @@ func testComputeNumConnectedPeers(suffix string) func(t *testing.T) { } func TestComputeConnectedPeers(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("main network", testComputeConnectedPeers("")) t.Run("full archive network", testComputeConnectedPeers(common.FullArchiveMetricSuffix)) @@ -203,8 +187,6 @@ func TestComputeConnectedPeers(t *testing.T) { func testComputeConnectedPeers(suffix string) func(t *testing.T) { return func(t *testing.T) { - t.Parallel() - netMes := &p2pmocks.MessengerStub{ GetConnectedPeersInfoCalled: func() *p2p.ConnectedPeersInfo { return &p2p.ConnectedPeersInfo{ @@ -294,7 +276,7 @@ func testComputeConnectedPeers(suffix string) func(t *testing.T) { } func TestManagedStatusComponents_IsInterfaceNil(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components managedStatusComponents, _ := statusComp.NewManagedStatusComponents(nil) require.True(t, managedStatusComponents.IsInterfaceNil()) diff --git a/factory/status/statusComponents_test.go b/factory/status/statusComponents_test.go index 35c7041d844..2b7c3e59379 100644 --- a/factory/status/statusComponents_test.go +++ b/factory/status/statusComponents_test.go @@ -15,6 +15,7 @@ import ( componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/require" @@ -45,7 +46,7 @@ func createMockStatusComponentsFactoryArgs() statusComp.StatusComponentsFactoryA NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, EpochStartNotifier: &mock.EpochStartNotifierStub{}, CoreComponents: &mock.CoreComponentsMock{ - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return 1000 }, @@ -66,11 +67,9 @@ func createMockStatusComponentsFactoryArgs() statusComp.StatusComponentsFactoryA } func TestNewStatusComponentsFactory(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("nil CoreComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CoreComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -78,8 +77,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilCoreComponentsHolder, err) }) t.Run("CoreComponents with nil GenesisNodesSetup should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CoreComponents = &mock.CoreComponentsMock{ NodesConfig: nil, @@ -89,8 +86,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilGenesisNodesSetupHandler, err) }) t.Run("nil NetworkComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.NetworkComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -98,8 +93,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilNetworkComponentsHolder, err) }) t.Run("nil ShardCoordinator should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.ShardCoordinator = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -107,8 +100,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilShardCoordinator, err) }) t.Run("nil NodesCoordinator should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.NodesCoordinator = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -116,8 +107,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilNodesCoordinator, err) }) t.Run("nil EpochStartNotifier should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.EpochStartNotifier = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -125,8 +114,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilEpochStartNotifier, err) }) t.Run("nil StatusCoreComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.StatusCoreComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -134,8 +121,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilStatusCoreComponents, err) }) t.Run("nil CryptoComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CryptoComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -143,8 +128,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilCryptoComponents, err) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) require.NotNil(t, scf) require.NoError(t, err) @@ -152,11 +135,11 @@ func TestNewStatusComponentsFactory(t *testing.T) { } func TestStatusComponentsFactory_Create(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } t.Run("NewSoftwareVersionFactory fails should return error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.StatusCoreComponents = &factory.StatusCoreComponentsStub{ AppStatusHandlerField: nil, // make NewSoftwareVersionFactory fail @@ -169,8 +152,6 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("softwareVersionCheckerFactory.Create fails should return error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.Config.SoftwareVersionConfig.PollingIntervalInMinutes = 0 scf, _ := statusComp.NewStatusComponentsFactory(args) @@ -181,11 +162,9 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("invalid round duration should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CoreComponents = &mock.CoreComponentsMock{ - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return 0 }, @@ -199,8 +178,6 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("makeWebSocketDriverArgs fails due to invalid marshaller type should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.ExternalConfig.HostDriversConfig[0].Enabled = true args.ExternalConfig.HostDriversConfig[0].MarshallerType = "invalid type" @@ -212,8 +189,6 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) shardCoordinator.SelfIDCalled = func() uint32 { return core.MetachainShardId // coverage @@ -232,7 +207,7 @@ func TestStatusComponentsFactory_Create(t *testing.T) { } func TestStatusComponentsFactory_epochStartEventHandler(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components args := createMockStatusComponentsFactoryArgs() args.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ @@ -252,7 +227,7 @@ func TestStatusComponentsFactory_epochStartEventHandler(t *testing.T) { } func TestStatusComponentsFactory_IsInterfaceNil(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components args := createMockStatusComponentsFactoryArgs() args.CoreComponents = nil @@ -264,7 +239,7 @@ func TestStatusComponentsFactory_IsInterfaceNil(t *testing.T) { } func TestStatusComponents_Close(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) cc, err := scf.Create() @@ -275,7 +250,7 @@ func TestStatusComponents_Close(t *testing.T) { } func TestMakeHostDriversArgs(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components args := createMockStatusComponentsFactoryArgs() args.ExternalConfig.HostDriversConfig = []config.HostDriversConfig{ diff --git a/genesis/interface.go b/genesis/interface.go index 1a618a44efe..e58708a236f 100644 --- a/genesis/interface.go +++ b/genesis/interface.go @@ -84,7 +84,7 @@ type InitialSmartContractHandler interface { } // InitialSmartContractParser contains the parsed genesis initial smart contracts -//json file and has some functionality regarding processed data +// json file and has some functionality regarding processed data type InitialSmartContractParser interface { InitialSmartContractsSplitOnOwnersShards(shardCoordinator sharding.Coordinator) (map[uint32][]InitialSmartContractHandler, error) GetDeployedSCAddresses(scType string) (map[string]struct{}, error) @@ -115,3 +115,9 @@ type DeployProcessor interface { Deploy(sc InitialSmartContractHandler) ([][]byte, error) IsInterfaceNil() bool } + +// VersionedHeaderFactory creates versioned headers +type VersionedHeaderFactory interface { + Create(epoch uint32) data.HeaderHandler + IsInterfaceNil() bool +} diff --git a/genesis/mock/coreComponentsMock.go b/genesis/mock/coreComponentsMock.go index fb0907ef8a0..e44dd801243 100644 --- a/genesis/mock/coreComponentsMock.go +++ b/genesis/mock/coreComponentsMock.go @@ -22,6 +22,12 @@ type CoreComponentsMock struct { StatHandler core.AppStatusHandler EnableEpochsHandlerField common.EnableEpochsHandler TxVersionCheck process.TxVersionCheckerHandler + EconomicsDataField process.EconomicsDataHandler +} + +// EconomicsData - +func (ccm *CoreComponentsMock) EconomicsData() process.EconomicsDataHandler { + return ccm.EconomicsDataField } // InternalMarshalizer - diff --git a/genesis/process/argGenesisBlockCreator.go b/genesis/process/argGenesisBlockCreator.go index e4374b7f6f0..685e356f31b 100644 --- a/genesis/process/argGenesisBlockCreator.go +++ b/genesis/process/argGenesisBlockCreator.go @@ -29,6 +29,7 @@ type coreComponentsHandler interface { TxVersionChecker() process.TxVersionCheckerHandler ChainID() string EnableEpochsHandler() common.EnableEpochsHandler + EconomicsData() process.EconomicsDataHandler IsInterfaceNil() bool } @@ -44,7 +45,10 @@ type dataComponentsHandler interface { // ArgsGenesisBlockCreator holds the arguments which are needed to create a genesis block type ArgsGenesisBlockCreator struct { GenesisTime uint64 + GenesisNonce uint64 + GenesisRound uint64 StartEpochNum uint32 + GenesisEpoch uint32 Data dataComponentsHandler Core coreComponentsHandler Accounts state.AccountsAdapter @@ -60,8 +64,9 @@ type ArgsGenesisBlockCreator struct { HardForkConfig config.HardforkConfig TrieStorageManagers map[string]common.StorageManager SystemSCConfig config.SystemSmartContractsConfig - RoundConfig *config.RoundConfig - EpochConfig *config.EpochConfig + RoundConfig config.RoundConfig + EpochConfig config.EpochConfig + HeaderVersionConfigs config.VersionsConfig WorkingDir string BlockSignKeyGen crypto.KeyGenerator HistoryRepository dblookupext.HistoryRepository @@ -69,6 +74,8 @@ type ArgsGenesisBlockCreator struct { GenesisNodePrice *big.Int GenesisString string + // created components - importHandler update.ImportHandler + importHandler update.ImportHandler + versionedHeaderFactory genesis.VersionedHeaderFactory } diff --git a/genesis/process/disabled/disabled_test.go b/genesis/process/disabled/disabled_test.go index 746ac02b57f..487a17c5af3 100644 --- a/genesis/process/disabled/disabled_test.go +++ b/genesis/process/disabled/disabled_test.go @@ -294,7 +294,7 @@ func TestSimpleNFTStorage(t *testing.T) { require.Equal(t, &esdt.ESDigitalToken{Value: big.NewInt(0)}, token) require.True(t, ok) require.Nil(t, err) - require.Nil(t, handler.SaveNFTMetaDataToSystemAccount(nil)) + require.Nil(t, handler.SaveNFTMetaData(nil)) require.False(t, handler.IsInterfaceNil()) } diff --git a/genesis/process/disabled/feeHandler.go b/genesis/process/disabled/feeHandler.go index 1fc34bbc2b5..f81e7e978eb 100644 --- a/genesis/process/disabled/feeHandler.go +++ b/genesis/process/disabled/feeHandler.go @@ -183,6 +183,11 @@ func (fh *FeeHandler) ComputeTxFeeBasedOnGasUsedInEpoch(tx data.TransactionWithF return big.NewInt(0) } +// ComputeRelayedTxFees returns 0 and 0 +func (fh *FeeHandler) ComputeRelayedTxFees(_ data.TransactionWithFeeHandler) (*big.Int, *big.Int, error) { + return big.NewInt(0), big.NewInt(0), nil +} + // IsInterfaceNil returns true if there is no value under the interface func (fh *FeeHandler) IsInterfaceNil() bool { return fh == nil diff --git a/genesis/process/disabled/nodesCoordinator.go b/genesis/process/disabled/nodesCoordinator.go new file mode 100644 index 00000000000..610230dd56f --- /dev/null +++ b/genesis/process/disabled/nodesCoordinator.go @@ -0,0 +1,15 @@ +package disabled + +// NodesCoordinator implements the NodesCoordinator interface, it does nothing as it is disabled +type NodesCoordinator struct { +} + +// GetNumTotalEligible - +func (n *NodesCoordinator) GetNumTotalEligible() uint64 { + return 1600 +} + +// IsInterfaceNil - +func (n *NodesCoordinator) IsInterfaceNil() bool { + return n == nil +} diff --git a/genesis/process/disabled/simpleNFTStorageHandler.go b/genesis/process/disabled/simpleNFTStorageHandler.go index 46534d07b56..d954a4fdd15 100644 --- a/genesis/process/disabled/simpleNFTStorageHandler.go +++ b/genesis/process/disabled/simpleNFTStorageHandler.go @@ -17,8 +17,8 @@ func (s *SimpleNFTStorage) GetESDTNFTTokenOnDestination(_ vmcommon.UserAccountHa return &esdt.ESDigitalToken{Value: big.NewInt(0)}, true, nil } -// SaveNFTMetaDataToSystemAccount is disabled -func (s *SimpleNFTStorage) SaveNFTMetaDataToSystemAccount(_ data.TransactionHandler) error { +// SaveNFTMetaData is disabled +func (s *SimpleNFTStorage) SaveNFTMetaData(_ data.TransactionHandler) error { return nil } diff --git a/genesis/process/genesisBlockCreator.go b/genesis/process/genesisBlockCreator.go index 2e9b14d7db3..3f5e559888f 100644 --- a/genesis/process/genesisBlockCreator.go +++ b/genesis/process/genesisBlockCreator.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + factoryBlock "github.com/multiversx/mx-chain-go/factory/block" "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/genesis/process/disabled" "github.com/multiversx/mx-chain-go/genesis/process/intermediate" @@ -82,7 +83,7 @@ func getGenesisBlocksRoundNonceEpoch(arg ArgsGenesisBlockCreator) (uint64, uint6 if arg.HardForkConfig.AfterHardFork { return arg.HardForkConfig.StartRound, arg.HardForkConfig.StartNonce, arg.HardForkConfig.StartEpoch } - return 0, 0, 0 + return arg.GenesisRound, arg.GenesisNonce, arg.GenesisEpoch } func (gbc *genesisBlockCreator) createHardForkImportHandler() error { @@ -131,8 +132,7 @@ func createStorer(storageConfig config.StorageConfig, folder string) (storage.St dbConfig := factory.GetDBFromConfig(storageConfig.DB) dbConfig.FilePath = path.Join(folder, storageConfig.DB.FilePath) - dbConfigHandler := factory.NewDBConfigHandler(storageConfig.DB) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(storageConfig.DB) if err != nil { return nil, err } @@ -195,12 +195,6 @@ func checkArgumentsForBlockCreator(arg ArgsGenesisBlockCreator) error { if arg.TrieStorageManagers == nil { return genesis.ErrNilTrieStorageManager } - if arg.EpochConfig == nil { - return genesis.ErrNilEpochConfig - } - if arg.RoundConfig == nil { - return genesis.ErrNilRoundConfig - } if check.IfNil(arg.HistoryRepository) { return process.ErrNilHistoryRepository } @@ -212,7 +206,7 @@ func checkArgumentsForBlockCreator(arg ArgsGenesisBlockCreator) error { } func mustDoGenesisProcess(arg ArgsGenesisBlockCreator) bool { - genesisEpoch := uint32(0) + genesisEpoch := arg.GenesisEpoch if arg.HardForkConfig.AfterHardFork { genesisEpoch = arg.HardForkConfig.StartEpoch } @@ -225,7 +219,7 @@ func mustDoGenesisProcess(arg ArgsGenesisBlockCreator) bool { } func (gbc *genesisBlockCreator) createEmptyGenesisBlocks() (map[uint32]data.HeaderHandler, error) { - err := gbc.computeDNSAddresses(createGenesisConfig()) + err := gbc.computeDNSAddresses(createGenesisConfig(gbc.arg.EpochConfig.EnableEpochs)) if err != nil { return nil, err } @@ -486,12 +480,17 @@ func (gbc *genesisBlockCreator) getNewArgForShard(shardID uint32) (ArgsGenesisBl var err error isCurrentShard := shardID == gbc.arg.ShardCoordinator.SelfId() + newArgument := gbc.arg // copy the arguments + newArgument.versionedHeaderFactory, err = gbc.createVersionedHeaderFactory() + if err != nil { + return ArgsGenesisBlockCreator{}, fmt.Errorf("'%w' while generating a VersionedHeaderFactory instance for shard %d", + err, shardID) + } + if isCurrentShard { - newArgument := gbc.arg // copy the arguments newArgument.Data = newArgument.Data.Clone().(dataComponentsHandler) return newArgument, nil } - newArgument := gbc.arg // copy the arguments argsAccCreator := factoryState.ArgsAccountCreator{ Hasher: newArgument.Core.Hasher(), @@ -530,6 +529,25 @@ func (gbc *genesisBlockCreator) getNewArgForShard(shardID uint32) (ArgsGenesisBl return newArgument, err } +func (gbc *genesisBlockCreator) createVersionedHeaderFactory() (genesis.VersionedHeaderFactory, error) { + cacheConfig := factory.GetCacherFromConfig(gbc.arg.HeaderVersionConfigs.Cache) + cache, err := storageunit.NewCache(cacheConfig) + if err != nil { + return nil, err + } + + headerVersionHandler, err := factoryBlock.NewHeaderVersionHandler( + gbc.arg.HeaderVersionConfigs.VersionsByEpochs, + gbc.arg.HeaderVersionConfigs.DefaultVersion, + cache, + ) + if err != nil { + return nil, err + } + + return factoryBlock.NewShardHeaderFactory(headerVersionHandler) +} + func (gbc *genesisBlockCreator) saveGenesisBlock(header data.HeaderHandler) error { blockBuff, err := gbc.arg.Core.InternalMarshalizer().Marshal(header) if err != nil { diff --git a/genesis/process/genesisBlockCreator_test.go b/genesis/process/genesisBlockCreator_test.go index e2df7d4c6c6..981af811c43 100644 --- a/genesis/process/genesisBlockCreator_test.go +++ b/genesis/process/genesisBlockCreator_test.go @@ -1,7 +1,5 @@ //go:build !race -// TODO reinstate test after Wasm VM pointer fix - package process import ( @@ -13,6 +11,8 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" @@ -76,6 +76,7 @@ func createMockArgument( TxVersionCheck: &testscommon.TxVersionCheckerStub{}, MinTxVersion: 1, EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EconomicsDataField: &economicsmocks.EconomicsHandlerMock{}, }, Data: &mock.DataComponentsMock{ Storage: &storageCommon.ChainStorerStub{ @@ -92,6 +93,7 @@ func createMockArgument( WasmVMVersions: []config.WasmVMVersionByEpoch{ {StartEpoch: 0, Version: "*"}, }, + TransferAndExecuteByUserAddresses: []string{"3132333435363738393031323334353637383930313233343536373839303234"}, }, HardForkConfig: config.HardforkConfig{ ImportKeysStorageConfig: config.StorageConfig{ @@ -152,6 +154,8 @@ func createMockArgument( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -162,27 +166,33 @@ func createMockArgument( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, TrieStorageManagers: trieStorageManagers, BlockSignKeyGen: &mock.KeyGenMock{}, GenesisNodePrice: nodePrice, - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: config.EnableEpochs{ - BuiltInFunctionsEnableEpoch: 0, - SCDeployEnableEpoch: 0, - RelayedTransactionsEnableEpoch: 0, - PenalizedTooMuchGasEnableEpoch: 0, - }, - }, - RoundConfig: &config.RoundConfig{ - RoundActivations: map[string]config.ActivationRoundByName{ - "DisableAsyncCallV1": { - Round: "18446744073709551615", - }, + SCDeployEnableEpoch: unreachableEpoch, + CleanUpInformativeSCRsEnableEpoch: unreachableEpoch, + SCProcessorV2EnableEpoch: unreachableEpoch, + StakeLimitsEnableEpoch: 10, }, }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, + versionedHeaderFactory: &testscommon.VersionedHeaderFactoryStub{ + CreateCalled: func(epoch uint32) data.HeaderHandler { + return &block.Header{} + }, + }, } arg.ShardCoordinator = &mock.ShardCoordinatorMock{ @@ -299,7 +309,8 @@ func TestNewGenesisBlockCreator(t *testing.T) { arg := createMockArgument(t, "testdata/genesisTest1.json", &mock.InitialNodesHandlerStub{}, big.NewInt(22000)) arg.Core = &mock.CoreComponentsMock{ - AddrPubKeyConv: nil, + AddrPubKeyConv: nil, + EconomicsDataField: &economicsmocks.EconomicsHandlerMock{}, } gbc, err := NewGenesisBlockCreator(arg) @@ -428,16 +439,6 @@ func TestNewGenesisBlockCreator(t *testing.T) { require.True(t, errors.Is(err, genesis.ErrNilTrieStorageManager)) require.Nil(t, gbc) }) - t.Run("nil EpochConfig should error", func(t *testing.T) { - t.Parallel() - - arg := createMockArgument(t, "testdata/genesisTest1.json", &mock.InitialNodesHandlerStub{}, big.NewInt(22000)) - arg.EpochConfig = nil - - gbc, err := NewGenesisBlockCreator(arg) - require.True(t, errors.Is(err, genesis.ErrNilEpochConfig)) - require.Nil(t, gbc) - }) t.Run("invalid GenesisNodePrice should error", func(t *testing.T) { t.Parallel() @@ -898,9 +899,9 @@ func TestCreateArgsGenesisBlockCreator_ShouldWorkAndCreateEmpty(t *testing.T) { blocks, err := gbc.CreateGenesisBlocks() assert.Nil(t, err) assert.Equal(t, 3, len(blocks)) - for _, block := range blocks { - assert.Zero(t, block.GetNonce()) - assert.Zero(t, block.GetRound()) - assert.Zero(t, block.GetEpoch()) + for _, blockInstance := range blocks { + assert.Zero(t, blockInstance.GetNonce()) + assert.Zero(t, blockInstance.GetRound()) + assert.Zero(t, blockInstance.GetEpoch()) } } diff --git a/genesis/process/metaGenesisBlockCreator.go b/genesis/process/metaGenesisBlockCreator.go index 40b5f606241..78546562736 100644 --- a/genesis/process/metaGenesisBlockCreator.go +++ b/genesis/process/metaGenesisBlockCreator.go @@ -19,6 +19,7 @@ import ( disabledCommon "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/common/enablers" "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" @@ -27,6 +28,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/preprocess" "github.com/multiversx/mx-chain-go/process/coordinator" + disabledProcess "github.com/multiversx/mx-chain-go/process/disabled" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/process/factory/metachain" disabledGuardian "github.com/multiversx/mx-chain-go/process/guardian/disabled" @@ -48,9 +50,6 @@ import ( "github.com/multiversx/mx-chain-vm-common-go/parsers" ) -const unreachableEpoch = ^uint32(0) -const unreachableRound = ^uint64(0) - // CreateMetaGenesisBlock will create a metachain genesis block func CreateMetaGenesisBlock( arg ArgsGenesisBlockCreator, @@ -70,7 +69,11 @@ func CreateMetaGenesisBlock( DeployInitialScTxs: make([]data.TransactionHandler, 0), } - processors, err := createProcessorsForMetaGenesisBlock(arg, createGenesisConfig(), createGenesisRoundConfig()) + processors, err := createProcessorsForMetaGenesisBlock( + arg, + createGenesisConfig(arg.EpochConfig.EnableEpochs), + createGenesisRoundConfig(arg.RoundConfig), + ) if err != nil { return nil, nil, nil, err } @@ -189,7 +192,8 @@ func createMetaGenesisBlockAfterHardFork( return nil, nil, nil, process.ErrWrongTypeAssertion } - err = arg.Accounts.RecreateTrie(hdrHandler.GetRootHash()) + rootHashHolder := holders.NewDefaultRootHashesHolder(hdrHandler.GetRootHash()) + err = arg.Accounts.RecreateTrie(rootHashHolder) if err != nil { return nil, nil, nil, err } @@ -295,7 +299,7 @@ func saveGenesisMetaToStorage( return nil } -func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig *config.RoundConfig) (*genesisProcessors, error) { +func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig config.RoundConfig) (*genesisProcessors, error) { epochNotifier := forking.NewGenericEpochNotifier() temporaryMetaHeader := &block.MetaBlock{ Epoch: arg.StartEpochNum, @@ -308,7 +312,7 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc epochNotifier.CheckEpoch(temporaryMetaHeader) roundNotifier := forking.NewGenericRoundNotifier() - enableRoundsHandler, err := enablers.NewEnableRoundsHandler(*roundConfig, roundNotifier) + enableRoundsHandler, err := enablers.NewEnableRoundsHandler(roundConfig, roundNotifier) if err != nil { return nil, err } @@ -360,6 +364,7 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc ChanceComputer: &disabled.Rater{}, ShardCoordinator: arg.ShardCoordinator, EnableEpochsHandler: enableEpochsHandler, + NodesCoordinator: &disabled.NodesCoordinator{}, } virtualMachineFactory, err := metachain.NewVMContainerFactory(argsNewVMContainerFactory) if err != nil { @@ -426,6 +431,11 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc return nil, err } + err = arg.Core.EconomicsData().SetTxTypeHandler(txTypeHandler) + if err != nil { + return nil, err + } + gasHandler, err := preprocess.NewGasComputation(arg.Economics, txTypeHandler, enableEpochsHandler) if err != nil { return nil, err @@ -433,28 +443,29 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc argsParser := smartContract.NewArgumentParser() argsNewSCProcessor := scrCommon.ArgsNewSmartContractProcessor{ - VmContainer: vmContainer, - ArgsParser: argsParser, - Hasher: arg.Core.Hasher(), - Marshalizer: arg.Core.InternalMarshalizer(), - AccountsDB: arg.Accounts, - BlockChainHook: virtualMachineFactory.BlockChainHookImpl(), - BuiltInFunctions: builtInFuncs, - PubkeyConv: arg.Core.AddressPubKeyConverter(), - ShardCoordinator: arg.ShardCoordinator, - ScrForwarder: scForwarder, - TxFeeHandler: genesisFeeHandler, - EconomicsFee: genesisFeeHandler, - TxTypeHandler: txTypeHandler, - GasHandler: gasHandler, - GasSchedule: arg.GasSchedule, - TxLogsProcessor: arg.TxLogsProcessor, - BadTxForwarder: badTxForwarder, - EnableRoundsHandler: enableRoundsHandler, - EnableEpochsHandler: enableEpochsHandler, - IsGenesisProcessing: true, - WasmVMChangeLocker: &sync.RWMutex{}, // local Locker as to not interfere with the rest of the components - VMOutputCacher: txcache.NewDisabledCache(), + VmContainer: vmContainer, + ArgsParser: argsParser, + Hasher: arg.Core.Hasher(), + Marshalizer: arg.Core.InternalMarshalizer(), + AccountsDB: arg.Accounts, + BlockChainHook: virtualMachineFactory.BlockChainHookImpl(), + BuiltInFunctions: builtInFuncs, + PubkeyConv: arg.Core.AddressPubKeyConverter(), + ShardCoordinator: arg.ShardCoordinator, + ScrForwarder: scForwarder, + TxFeeHandler: genesisFeeHandler, + EconomicsFee: genesisFeeHandler, + TxTypeHandler: txTypeHandler, + GasHandler: gasHandler, + GasSchedule: arg.GasSchedule, + TxLogsProcessor: arg.TxLogsProcessor, + BadTxForwarder: badTxForwarder, + EnableRoundsHandler: enableRoundsHandler, + EnableEpochsHandler: enableEpochsHandler, + IsGenesisProcessing: true, + WasmVMChangeLocker: &sync.RWMutex{}, // local Locker as to not interfere with the rest of the components + VMOutputCacher: txcache.NewDisabledCache(), + FailedTxLogsAccumulator: disabledProcess.NewFailedTxLogsAccumulator(), } scProcessorProxy, err := processProxy.NewSmartContractProcessorProxy(argsNewSCProcessor, epochNotifier) diff --git a/genesis/process/shardGenesisBlockCreator.go b/genesis/process/shardGenesisBlockCreator.go index ec3cf7f8d6b..b44ed14c207 100644 --- a/genesis/process/shardGenesisBlockCreator.go +++ b/genesis/process/shardGenesisBlockCreator.go @@ -5,7 +5,6 @@ import ( "fmt" "math" "math/big" - "strconv" "sync" "github.com/multiversx/mx-chain-core-go/core/check" @@ -16,6 +15,7 @@ import ( disabledCommon "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/common/enablers" "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" "github.com/multiversx/mx-chain-go/genesis" @@ -24,6 +24,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/preprocess" "github.com/multiversx/mx-chain-go/process/coordinator" + processDisabled "github.com/multiversx/mx-chain-go/process/disabled" "github.com/multiversx/mx-chain-go/process/factory/shard" disabledGuardian "github.com/multiversx/mx-chain-go/process/guardian/disabled" "github.com/multiversx/mx-chain-go/process/receipts" @@ -45,8 +46,9 @@ import ( "github.com/multiversx/mx-chain-vm-common-go/parsers" ) -var log = logger.GetOrCreate("genesis/process") +const unreachableEpoch = ^uint32(0) +var log = logger.GetOrCreate("genesis/process") var zero = big.NewInt(0) type deployedScMetrics struct { @@ -54,114 +56,27 @@ type deployedScMetrics struct { numOtherTypes int } -func createGenesisConfig() config.EnableEpochs { - blsMultiSignerEnableEpoch := []config.MultiSignerConfig{ +func createGenesisConfig(providedEnableEpochs config.EnableEpochs) config.EnableEpochs { + clonedConfig := providedEnableEpochs + clonedConfig.BuiltInFunctionsEnableEpoch = 0 + clonedConfig.PenalizedTooMuchGasEnableEpoch = unreachableEpoch + clonedConfig.MaxNodesChangeEnableEpoch = []config.MaxNodesChangeConfig{ { - EnableEpoch: 0, - Type: "no-KOSK", + EpochEnable: unreachableEpoch, + MaxNumNodes: 0, + NodesToShufflePerShard: 0, }, } + clonedConfig.StakeEnableEpoch = unreachableEpoch // we need to specifically disable this, we have exceptions in the staking system SC + clonedConfig.DoubleKeyProtectionEnableEpoch = 0 - return config.EnableEpochs{ - SCDeployEnableEpoch: unreachableEpoch, - BuiltInFunctionsEnableEpoch: 0, - RelayedTransactionsEnableEpoch: unreachableEpoch, - PenalizedTooMuchGasEnableEpoch: unreachableEpoch, - SwitchJailWaitingEnableEpoch: unreachableEpoch, - SwitchHysteresisForMinNodesEnableEpoch: unreachableEpoch, - BelowSignedThresholdEnableEpoch: unreachableEpoch, - TransactionSignedWithTxHashEnableEpoch: unreachableEpoch, - MetaProtectionEnableEpoch: unreachableEpoch, - AheadOfTimeGasUsageEnableEpoch: unreachableEpoch, - GasPriceModifierEnableEpoch: unreachableEpoch, - RepairCallbackEnableEpoch: unreachableEpoch, - MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ - { - EpochEnable: unreachableEpoch, - MaxNumNodes: 0, - NodesToShufflePerShard: 0, - }, - }, - BlockGasAndFeesReCheckEnableEpoch: unreachableEpoch, - StakingV2EnableEpoch: unreachableEpoch, - StakeEnableEpoch: unreachableEpoch, // no need to enable this, we have builtin exceptions in staking system SC - DoubleKeyProtectionEnableEpoch: 0, - ESDTEnableEpoch: unreachableEpoch, - GovernanceEnableEpoch: unreachableEpoch, - GovernanceDisableProposeEnableEpoch: unreachableEpoch, - GovernanceFixesEnableEpoch: unreachableEpoch, - DelegationManagerEnableEpoch: unreachableEpoch, - DelegationSmartContractEnableEpoch: unreachableEpoch, - CorrectLastUnjailedEnableEpoch: unreachableEpoch, - BalanceWaitingListsEnableEpoch: unreachableEpoch, - ReturnDataToLastTransferEnableEpoch: unreachableEpoch, - SenderInOutTransferEnableEpoch: unreachableEpoch, - RelayedTransactionsV2EnableEpoch: unreachableEpoch, - UnbondTokensV2EnableEpoch: unreachableEpoch, - SaveJailedAlwaysEnableEpoch: unreachableEpoch, - ValidatorToDelegationEnableEpoch: unreachableEpoch, - ReDelegateBelowMinCheckEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, - IncrementSCRNonceInMultiTransferEnableEpoch: unreachableEpoch, - ESDTMultiTransferEnableEpoch: unreachableEpoch, - GlobalMintBurnDisableEpoch: unreachableEpoch, - ESDTTransferRoleEnableEpoch: unreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: unreachableEpoch, - ComputeRewardCheckpointEnableEpoch: unreachableEpoch, - SCRSizeInvariantCheckEnableEpoch: unreachableEpoch, - BackwardCompSaveKeyValueEnableEpoch: unreachableEpoch, - ESDTNFTCreateOnMultiShardEnableEpoch: unreachableEpoch, - MetaESDTSetEnableEpoch: unreachableEpoch, - AddTokensToDelegationEnableEpoch: unreachableEpoch, - MultiESDTTransferFixOnCallBackOnEnableEpoch: unreachableEpoch, - OptimizeGasUsedInCrossMiniBlocksEnableEpoch: unreachableEpoch, - CorrectFirstQueuedEpoch: unreachableEpoch, - CorrectJailedNotUnstakedEmptyQueueEpoch: unreachableEpoch, - FixOOGReturnCodeEnableEpoch: unreachableEpoch, - RemoveNonUpdatedStorageEnableEpoch: unreachableEpoch, - DeleteDelegatorAfterClaimRewardsEnableEpoch: unreachableEpoch, - OptimizeNFTStoreEnableEpoch: unreachableEpoch, - CreateNFTThroughExecByCallerEnableEpoch: unreachableEpoch, - StopDecreasingValidatorRatingWhenStuckEnableEpoch: unreachableEpoch, - FrontRunningProtectionEnableEpoch: unreachableEpoch, - IsPayableBySCEnableEpoch: unreachableEpoch, - CleanUpInformativeSCRsEnableEpoch: unreachableEpoch, - StorageAPICostOptimizationEnableEpoch: unreachableEpoch, - TransformToMultiShardCreateEnableEpoch: unreachableEpoch, - ESDTRegisterAndSetAllRolesEnableEpoch: unreachableEpoch, - ScheduledMiniBlocksEnableEpoch: unreachableEpoch, - FailExecutionOnEveryAPIErrorEnableEpoch: unreachableEpoch, - AddFailedRelayedTxToInvalidMBsDisableEpoch: unreachableEpoch, - SCRSizeInvariantOnBuiltInResultEnableEpoch: unreachableEpoch, - ManagedCryptoAPIsEnableEpoch: unreachableEpoch, - CheckCorrectTokenIDForTransferRoleEnableEpoch: unreachableEpoch, - DisableExecByCallerEnableEpoch: unreachableEpoch, - RefactorContextEnableEpoch: unreachableEpoch, - CheckFunctionArgumentEnableEpoch: unreachableEpoch, - CheckExecuteOnReadOnlyEnableEpoch: unreachableEpoch, - MiniBlockPartialExecutionEnableEpoch: unreachableEpoch, - ESDTMetadataContinuousCleanupEnableEpoch: unreachableEpoch, - FixAsyncCallBackArgsListEnableEpoch: unreachableEpoch, - FixOldTokenLiquidityEnableEpoch: unreachableEpoch, - SetSenderInEeiOutputTransferEnableEpoch: unreachableEpoch, - RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, - SCProcessorV2EnableEpoch: unreachableEpoch, - DoNotReturnOldBlockInBlockchainHookEnableEpoch: unreachableEpoch, - MaxBlockchainHookCountersEnableEpoch: unreachableEpoch, - BLSMultiSignerEnableEpoch: blsMultiSignerEnableEpoch, - SetGuardianEnableEpoch: unreachableEpoch, - ScToScLogEventEnableEpoch: unreachableEpoch, - } + return clonedConfig } -func createGenesisRoundConfig() *config.RoundConfig { - return &config.RoundConfig{ - RoundActivations: map[string]config.ActivationRoundByName{ - "DisableAsyncCallV1": { - Round: strconv.FormatUint(unreachableRound, 10), - }, - }, - } +func createGenesisRoundConfig(providedEnableRounds config.RoundConfig) config.RoundConfig { + clonedConfig := providedEnableRounds + + return clonedConfig } // CreateShardGenesisBlock will create a shard genesis block @@ -183,7 +98,11 @@ func CreateShardGenesisBlock( DeployInitialScTxs: make([]data.TransactionHandler, 0), } - processors, err := createProcessorsForShardGenesisBlock(arg, createGenesisConfig(), createGenesisRoundConfig()) + processors, err := createProcessorsForShardGenesisBlock( + arg, + createGenesisConfig(arg.EpochConfig.EnableEpochs), + createGenesisRoundConfig(arg.RoundConfig), + ) if err != nil { return nil, nil, nil, err } @@ -243,22 +162,10 @@ func CreateShardGenesisBlock( ) round, nonce, epoch := getGenesisBlocksRoundNonceEpoch(arg) - header := &block.Header{ - Epoch: epoch, - Round: round, - Nonce: nonce, - ShardID: arg.ShardCoordinator.SelfId(), - BlockBodyType: block.StateBlock, - PubKeysBitmap: []byte{1}, - Signature: rootHash, - RootHash: rootHash, - PrevRandSeed: rootHash, - RandSeed: rootHash, - TimeStamp: arg.GenesisTime, - AccumulatedFees: big.NewInt(0), - DeveloperFees: big.NewInt(0), - ChainID: []byte(arg.Core.ChainID()), - SoftwareVersion: []byte(""), + headerHandler := arg.versionedHeaderFactory.Create(epoch) + err = setInitialDataInHeader(headerHandler, arg, epoch, nonce, round, rootHash) + if err != nil { + return nil, nil, nil, err } err = processors.vmContainer.Close() @@ -271,7 +178,46 @@ func CreateShardGenesisBlock( return nil, nil, nil, err } - return header, scAddresses, indexingData, nil + return headerHandler, scAddresses, indexingData, nil +} + +func setInitialDataInHeader( + headerHandler data.HeaderHandler, + arg ArgsGenesisBlockCreator, + epoch uint32, + nonce uint64, + round uint64, + rootHash []byte, +) error { + shardHeaderHandler, ok := headerHandler.(data.ShardHeaderHandler) + if !ok { + return process.ErrWrongTypeAssertion + } + + setErrors := make([]error, 0) + setErrors = append(setErrors, shardHeaderHandler.SetEpoch(epoch)) + setErrors = append(setErrors, shardHeaderHandler.SetNonce(nonce)) + setErrors = append(setErrors, shardHeaderHandler.SetRound(round)) + setErrors = append(setErrors, shardHeaderHandler.SetShardID(arg.ShardCoordinator.SelfId())) + setErrors = append(setErrors, shardHeaderHandler.SetBlockBodyTypeInt32(int32(block.StateBlock))) + setErrors = append(setErrors, shardHeaderHandler.SetPubKeysBitmap([]byte{1})) + setErrors = append(setErrors, shardHeaderHandler.SetSignature(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetRootHash(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetPrevRandSeed(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetRandSeed(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetTimeStamp(arg.GenesisTime)) + setErrors = append(setErrors, shardHeaderHandler.SetAccumulatedFees(big.NewInt(0))) + setErrors = append(setErrors, shardHeaderHandler.SetDeveloperFees(big.NewInt(0))) + setErrors = append(setErrors, shardHeaderHandler.SetChainID([]byte(arg.Core.ChainID()))) + setErrors = append(setErrors, shardHeaderHandler.SetSoftwareVersion([]byte(""))) + + for _, err := range setErrors { + if err != nil { + return err + } + } + + return nil } func createShardGenesisBlockAfterHardFork( @@ -299,7 +245,8 @@ func createShardGenesisBlockAfterHardFork( return nil, nil, nil, err } - err = arg.Accounts.RecreateTrie(hdrHandler.GetRootHash()) + rootHashHolder := holders.NewDefaultRootHashesHolder(hdrHandler.GetRootHash()) + err = arg.Accounts.RecreateTrie(rootHashHolder) if err != nil { return nil, nil, nil, err } @@ -401,7 +348,7 @@ func setBalanceToTrie(arg ArgsGenesisBlockCreator, accnt genesis.InitialAccountH return arg.Accounts.SaveAccount(account) } -func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig *config.RoundConfig) (*genesisProcessors, error) { +func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig config.RoundConfig) (*genesisProcessors, error) { genesisWasmVMLocker := &sync.RWMutex{} // use a local instance as to not run in concurrent issues when doing bootstrap epochNotifier := forking.NewGenericEpochNotifier() enableEpochsHandler, err := enablers.NewEnableEpochsHandler(enableEpochsConfig, epochNotifier) @@ -410,7 +357,7 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo } roundNotifier := forking.NewGenericRoundNotifier() - enableRoundsHandler, err := enablers.NewEnableRoundsHandler(*roundConfig, roundNotifier) + enableRoundsHandler, err := enablers.NewEnableRoundsHandler(roundConfig, roundNotifier) if err != nil { return nil, err } @@ -475,6 +422,7 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo ESDTTransferParser: esdtTransferParser, BuiltInFunctions: argsHook.BuiltInFunctions, Hasher: arg.Core.Hasher(), + PubKeyConverter: arg.Core.AddressPubKeyConverter(), } vmFactoryImpl, err := shard.NewVMContainerFactory(argsNewVMFactory) if err != nil { @@ -553,34 +501,40 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo return nil, err } + err = arg.Core.EconomicsData().SetTxTypeHandler(txTypeHandler) + if err != nil { + return nil, err + } + gasHandler, err := preprocess.NewGasComputation(arg.Economics, txTypeHandler, enableEpochsHandler) if err != nil { return nil, err } argsNewScProcessor := scrCommon.ArgsNewSmartContractProcessor{ - VmContainer: vmContainer, - ArgsParser: smartContract.NewArgumentParser(), - Hasher: arg.Core.Hasher(), - Marshalizer: arg.Core.InternalMarshalizer(), - AccountsDB: arg.Accounts, - BlockChainHook: vmFactoryImpl.BlockChainHookImpl(), - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - PubkeyConv: arg.Core.AddressPubKeyConverter(), - ShardCoordinator: arg.ShardCoordinator, - ScrForwarder: scForwarder, - TxFeeHandler: genesisFeeHandler, - EconomicsFee: genesisFeeHandler, - TxTypeHandler: txTypeHandler, - GasHandler: gasHandler, - GasSchedule: arg.GasSchedule, - TxLogsProcessor: arg.TxLogsProcessor, - BadTxForwarder: badTxInterim, - EnableRoundsHandler: enableRoundsHandler, - EnableEpochsHandler: enableEpochsHandler, - IsGenesisProcessing: true, - VMOutputCacher: txcache.NewDisabledCache(), - WasmVMChangeLocker: genesisWasmVMLocker, + VmContainer: vmContainer, + ArgsParser: smartContract.NewArgumentParser(), + Hasher: arg.Core.Hasher(), + Marshalizer: arg.Core.InternalMarshalizer(), + AccountsDB: arg.Accounts, + BlockChainHook: vmFactoryImpl.BlockChainHookImpl(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + PubkeyConv: arg.Core.AddressPubKeyConverter(), + ShardCoordinator: arg.ShardCoordinator, + ScrForwarder: scForwarder, + TxFeeHandler: genesisFeeHandler, + EconomicsFee: genesisFeeHandler, + TxTypeHandler: txTypeHandler, + GasHandler: gasHandler, + GasSchedule: arg.GasSchedule, + TxLogsProcessor: arg.TxLogsProcessor, + BadTxForwarder: badTxInterim, + EnableRoundsHandler: enableRoundsHandler, + EnableEpochsHandler: enableEpochsHandler, + IsGenesisProcessing: true, + VMOutputCacher: txcache.NewDisabledCache(), + WasmVMChangeLocker: genesisWasmVMLocker, + FailedTxLogsAccumulator: processDisabled.NewFailedTxLogsAccumulator(), } scProcessorProxy, err := processProxy.NewSmartContractProcessorProxy(argsNewScProcessor, epochNotifier) @@ -598,25 +552,27 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo } argsNewTxProcessor := transaction.ArgsNewTxProcessor{ - Accounts: arg.Accounts, - Hasher: arg.Core.Hasher(), - PubkeyConv: arg.Core.AddressPubKeyConverter(), - Marshalizer: arg.Core.InternalMarshalizer(), - SignMarshalizer: arg.Core.TxMarshalizer(), - ShardCoordinator: arg.ShardCoordinator, - ScProcessor: scProcessorProxy, - TxFeeHandler: genesisFeeHandler, - TxTypeHandler: txTypeHandler, - EconomicsFee: genesisFeeHandler, - ReceiptForwarder: receiptTxInterim, - BadTxForwarder: badTxInterim, - ArgsParser: smartContract.NewArgumentParser(), - ScrForwarder: scForwarder, - EnableRoundsHandler: enableRoundsHandler, - EnableEpochsHandler: enableEpochsHandler, - TxVersionChecker: arg.Core.TxVersionChecker(), - GuardianChecker: disabledGuardian.NewDisabledGuardedAccountHandler(), - TxLogsProcessor: arg.TxLogsProcessor, + Accounts: arg.Accounts, + Hasher: arg.Core.Hasher(), + PubkeyConv: arg.Core.AddressPubKeyConverter(), + Marshalizer: arg.Core.InternalMarshalizer(), + SignMarshalizer: arg.Core.TxMarshalizer(), + ShardCoordinator: arg.ShardCoordinator, + ScProcessor: scProcessorProxy, + TxFeeHandler: genesisFeeHandler, + TxTypeHandler: txTypeHandler, + EconomicsFee: genesisFeeHandler, + ReceiptForwarder: receiptTxInterim, + BadTxForwarder: badTxInterim, + ArgsParser: smartContract.NewArgumentParser(), + ScrForwarder: scForwarder, + EnableRoundsHandler: enableRoundsHandler, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: arg.Core.TxVersionChecker(), + GuardianChecker: disabledGuardian.NewDisabledGuardedAccountHandler(), + TxLogsProcessor: arg.TxLogsProcessor, + RelayedTxV3Processor: processDisabled.NewRelayedTxV3Processor(), + FailedTxLogsAccumulator: processDisabled.NewFailedTxLogsAccumulator(), } transactionProcessor, err := transaction.NewTxProcessor(argsNewTxProcessor) if err != nil { diff --git a/go.mod b/go.mod index 9f27d2e1ffd..4667bd06e7e 100644 --- a/go.mod +++ b/go.mod @@ -12,19 +12,20 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 + github.com/klauspost/cpuid/v2 v2.2.5 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381 - github.com/multiversx/mx-chain-core-go v1.2.19-0.20231214115026-a1e7279b14f1 - github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b - github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058 - github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040 - github.com/multiversx/mx-chain-scenario-go v1.2.2-0.20231129113427-ad3056f45296 - github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4 - github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20231228070003-ae14e1e0adfa - github.com/multiversx/mx-chain-vm-go v1.5.23-0.20231228064104-964359cb8dd3 - github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216 - github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14 - github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955 + github.com/multiversx/mx-chain-communication-go v1.1.0 + github.com/multiversx/mx-chain-core-go v1.2.21 + github.com/multiversx/mx-chain-crypto-go v1.2.12 + github.com/multiversx/mx-chain-es-indexer-go v1.7.4 + github.com/multiversx/mx-chain-logger-go v1.0.15 + github.com/multiversx/mx-chain-scenario-go v1.4.4 + github.com/multiversx/mx-chain-storage-go v1.0.16 + github.com/multiversx/mx-chain-vm-common-go v1.5.13 + github.com/multiversx/mx-chain-vm-go v1.5.32-0.20240808073353-f1fbbf147537 + github.com/multiversx/mx-chain-vm-v1_2-go v1.2.68 + github.com/multiversx/mx-chain-vm-v1_3-go v1.3.69 + github.com/multiversx/mx-chain-vm-v1_4-go v1.4.98 github.com/pelletier/go-toml v1.9.3 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 @@ -48,7 +49,7 @@ require ( github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/denisbrodbeck/machineid v1.0.1 // indirect @@ -92,7 +93,6 @@ require ( github.com/jbenet/goprocess v0.1.4 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.16.5 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/leodido/go-urn v1.2.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect @@ -150,8 +150,7 @@ require ( github.com/quic-go/quic-go v0.33.0 // indirect github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/russross/blackfriday/v2 v2.0.1 // indirect - github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/smartystreets/assertions v1.13.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect diff --git a/go.sum b/go.sum index 0375c025713..11a9bc62556 100644 --- a/go.sum +++ b/go.sum @@ -72,8 +72,9 @@ github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -384,30 +385,30 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381 h1:M4JNeubA+zq7NaH2LP5YsWUVeKn9hNL+HgSw2kqwWUc= -github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381/go.mod h1:n4E8BWIV0g3AcNGe1gf+vcjUC8A2QCJ4ARQSbiUDGrI= -github.com/multiversx/mx-chain-core-go v1.2.19-0.20231214115026-a1e7279b14f1 h1:8rz1ZpRAsWVxSEBy7PJIUStQMKiHs3I4mvpRmHUpsbI= -github.com/multiversx/mx-chain-core-go v1.2.19-0.20231214115026-a1e7279b14f1/go.mod h1:B5zU4MFyJezmEzCsAHE9YNULmGCm2zbPHvl9hazNxmE= -github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b h1:TIE6it719ZIW0E1bFgPAgE+U3zPSkPfAloFYEIeOL3U= -github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b/go.mod h1:Ap6p7QZFtwPlb++OvCG+85BfuZ+bLP/JtQp6EwjWJsI= -github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058 h1:6XH7ua4vUqhbE4NMzs8K63b7A/9KMO4H8XZfYjyy778= -github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058/go.mod h1:9BzrDTbIjruFXN6YcDOBsnOP0cUHhQobRUlmNOwkDME= -github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040 h1:rsEflKFn5StRh0ADxElUkI/9wZV0Lbig+b0671LmjTk= -github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040/go.mod h1:fH/fR/GEBsDjPkBoZDVJMoYo2HhlA7++DP6QfITJ1N8= -github.com/multiversx/mx-chain-scenario-go v1.2.2-0.20231129113427-ad3056f45296 h1:jDGGEubkiTJfEFcbErUYCYM2Z6wKapgZyGaICScpynk= -github.com/multiversx/mx-chain-scenario-go v1.2.2-0.20231129113427-ad3056f45296/go.mod h1:WocyahfHCC3oGILEVdRe7I4/+q/TLCORoTo1X4wGmF4= -github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4 h1:2RJ6T31pLN75l4xfhTicGZ+gVOPMxSGPip+O1XYVYac= -github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4/go.mod h1:ioCT2oHQ+TyHQYpgjxzlUdy7dCdv56+w5HnBg9z96eY= -github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20231228070003-ae14e1e0adfa h1:xdDeUC4yOfiUwctkYioYMjjigBZoZo5RZq1e5WoCVRs= -github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20231228070003-ae14e1e0adfa/go.mod h1:7jjGRykSfLeMs6iQdszlE0lGK2xp9/cctiVdeKbQLLM= -github.com/multiversx/mx-chain-vm-go v1.5.23-0.20231228064104-964359cb8dd3 h1:qfzeTPI2oSgxnw52KiVWc2fHMem6FZIkX1Azwy64098= -github.com/multiversx/mx-chain-vm-go v1.5.23-0.20231228064104-964359cb8dd3/go.mod h1:4kcpwq70UB3Clnc6Q0krGA8hgQ26JTQpmCP+4y5aiV0= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216 h1:CDSn4hgiGwoOSSLmajgOvjdoRxfJSXjEu/CfXiqihwo= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216/go.mod h1:h87SKR/p66XP0Er2Mx2KfjzS6mLmW6l3tDWyO1oNr94= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14 h1:7r2zQiAfqGjN7U8j5obXIoRSh+vnoupBhxBgQGUA2ck= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14/go.mod h1:MnpQOi/P4K744ZJl8pQksulsHazmN6YRzJ4amgtZ0OQ= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955 h1:5b0+UeSbcyh+9z9x/6Nql3cYwaNWzTwj+KIfH4YaASs= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955/go.mod h1:+DLltGV0h3/H9bJaz01JyeapKNki3Rh4o5VGpjd2ZNc= +github.com/multiversx/mx-chain-communication-go v1.1.0 h1:J7bX6HoN3HiHY7cUeEjG8AJWgQDDPcY+OPDOsSUOkRE= +github.com/multiversx/mx-chain-communication-go v1.1.0/go.mod h1:WK6bP4pGEHGDDna/AYRIMtl6G9OA0NByI1Lw8PmOnRM= +github.com/multiversx/mx-chain-core-go v1.2.21 h1:+XVKznPTlUU5EFS1A8chtS8fStW60upRIyF4Pgml19I= +github.com/multiversx/mx-chain-core-go v1.2.21/go.mod h1:B5zU4MFyJezmEzCsAHE9YNULmGCm2zbPHvl9hazNxmE= +github.com/multiversx/mx-chain-crypto-go v1.2.12 h1:zWip7rpUS4CGthJxfKn5MZfMfYPjVjIiCID6uX5BSOk= +github.com/multiversx/mx-chain-crypto-go v1.2.12/go.mod h1:HzcPpCm1zanNct/6h2rIh+MFrlXbjA5C8+uMyXj3LI4= +github.com/multiversx/mx-chain-es-indexer-go v1.7.4 h1:SjJk9G9SN8baz0sFIU2jymYCfx3XiikGEB2wW0jwvfw= +github.com/multiversx/mx-chain-es-indexer-go v1.7.4/go.mod h1:oGcRK2E3Syv6vRTszWrrb/TqD8akq0yeoMr1wPPiTO4= +github.com/multiversx/mx-chain-logger-go v1.0.15 h1:HlNdK8etyJyL9NQ+6mIXyKPEBo+wRqOwi3n+m2QIHXc= +github.com/multiversx/mx-chain-logger-go v1.0.15/go.mod h1:t3PRKaWB1M+i6gUfD27KXgzLJJC+mAQiN+FLlL1yoGQ= +github.com/multiversx/mx-chain-scenario-go v1.4.4 h1:DVE2V+FPeyD/yWoC+KEfPK3jsFzHeruelESfpTlf460= +github.com/multiversx/mx-chain-scenario-go v1.4.4/go.mod h1:kI+TWR3oIEgUkbwkHCPo2CQ3VjIge+ezGTibiSGwMxo= +github.com/multiversx/mx-chain-storage-go v1.0.16 h1:l2lJq+EAN3YwLbjJrnoKfFd1/1Xmo9DcAUECND2obLs= +github.com/multiversx/mx-chain-storage-go v1.0.16/go.mod h1:uM/z7YyqTOD3wgyH8TfapyEl5sb+7x/Jaxne4cfG4HI= +github.com/multiversx/mx-chain-vm-common-go v1.5.13 h1:ymnIHJW4Z4mFa0hZzla4fozkF30vjH5O1q+Y7Ftc+pQ= +github.com/multiversx/mx-chain-vm-common-go v1.5.13/go.mod h1:OSvFbzdWThfRbLZbUsEr7bikBSaLrPJQ2iUm9jw9nXQ= +github.com/multiversx/mx-chain-vm-go v1.5.32-0.20240808073353-f1fbbf147537 h1:x1Fn0tlkicBNsRB/co/c9TTjyvCrzmE/rVXA8uUWhII= +github.com/multiversx/mx-chain-vm-go v1.5.32-0.20240808073353-f1fbbf147537/go.mod h1:iq6sCPweoHC9Fx56uf8buPrqlGVGJKUMRFxTunzjvys= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.68 h1:L3GoAVFtLLzr9ya0rVv1YdTUzS3MyM7kQNBSAjCNO2g= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.68/go.mod h1:ixxwib+1pXwSDHG5Wa34v0SRScF+BwFzH4wFWY31saI= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.69 h1:G/PLsyfQV4bMLs2amGRvaLKZoW1DC7M+7ecVaLuaCNc= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.69/go.mod h1:msY3zaS+K+R10ypqQs/jke6xdNAJzS38PGIaeJj2zhg= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.98 h1:/fYx4ClVPU48pTKh2qk4QVlve0xjjDpvzOakjFUtXJ8= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.98/go.mod h1:4vqG8bSmufZx263DMrmr8OLbO6q6//VPC4W9PxZLB5Q= github.com/multiversx/mx-components-big-int v1.0.0 h1:Wkr8lSzK2nDqixOrrBa47VNuqdhV1m/aJhaP1EMaiS8= github.com/multiversx/mx-components-big-int v1.0.0/go.mod h1:maIEMgHlNE2u78JaDD0oLzri+ShgU4okHfzP3LWGdQM= github.com/multiversx/protobuf v1.3.2 h1:RaNkxvGTGbA0lMcnHAN24qE1G1i+Xs5yHA6MDvQ4mSM= @@ -486,8 +487,9 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -512,7 +514,6 @@ github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= diff --git a/heartbeat/interface.go b/heartbeat/interface.go index 12eb29a5d61..3652170d8ba 100644 --- a/heartbeat/interface.go +++ b/heartbeat/interface.go @@ -83,6 +83,7 @@ type ManagedPeersHolder interface { IncrementRoundsWithoutReceivedMessages(pkBytes []byte) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) GetManagedKeysByCurrentNode() map[string]crypto.PrivateKey + GetLoadedKeysByCurrentNode() [][]byte IsKeyManagedByCurrentNode(pkBytes []byte) bool IsKeyRegistered(pkBytes []byte) bool IsPidManagedByCurrentNode(pid core.PeerID) bool diff --git a/heartbeat/monitor/crossShardPeerTopicNotifier.go b/heartbeat/monitor/crossShardPeerTopicNotifier.go deleted file mode 100644 index aa25995fc71..00000000000 --- a/heartbeat/monitor/crossShardPeerTopicNotifier.go +++ /dev/null @@ -1,111 +0,0 @@ -package monitor - -import ( - "fmt" - "strconv" - "strings" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/heartbeat" - "github.com/multiversx/mx-chain-go/sharding" -) - -const topicSeparator = "_" - -// ArgsCrossShardPeerTopicNotifier represents the arguments for the cross shard peer topic notifier -type ArgsCrossShardPeerTopicNotifier struct { - ShardCoordinator sharding.Coordinator - PeerShardMapper heartbeat.PeerShardMapper -} - -type crossShardPeerTopicNotifier struct { - shardCoordinator sharding.Coordinator - peerShardMapper heartbeat.PeerShardMapper -} - -// NewCrossShardPeerTopicNotifier create a new cross shard peer topic notifier instance -func NewCrossShardPeerTopicNotifier(args ArgsCrossShardPeerTopicNotifier) (*crossShardPeerTopicNotifier, error) { - err := checkArgsCrossShardPeerTopicNotifier(args) - if err != nil { - return nil, err - } - - notifier := &crossShardPeerTopicNotifier{ - shardCoordinator: args.ShardCoordinator, - peerShardMapper: args.PeerShardMapper, - } - - return notifier, nil -} - -func checkArgsCrossShardPeerTopicNotifier(args ArgsCrossShardPeerTopicNotifier) error { - if check.IfNil(args.PeerShardMapper) { - return heartbeat.ErrNilPeerShardMapper - } - if check.IfNil(args.ShardCoordinator) { - return heartbeat.ErrNilShardCoordinator - } - - return nil -} - -// NewPeerFound is called whenever a new peer was found -func (notifier *crossShardPeerTopicNotifier) NewPeerFound(pid core.PeerID, topic string) { - splt := strings.Split(topic, topicSeparator) - if len(splt) != 3 { - // not a cross shard peer or the topic is global - return - } - - shardID1, err := notifier.getShardID(splt[1]) - if err != nil { - log.Error("failed to extract first shard for topic", "topic", topic, "error", err.Error()) - return - } - - shardID2, err := notifier.getShardID(splt[2]) - if err != nil { - log.Error("failed to extract second shard for topic", "topic", topic, "error", err.Error()) - return - } - if shardID1 == shardID2 { - return - } - notifier.checkAndAddShardID(pid, shardID1, topic, shardID2) - notifier.checkAndAddShardID(pid, shardID2, topic, shardID1) -} - -// TODO make a standalone component out of this -func (notifier *crossShardPeerTopicNotifier) getShardID(data string) (uint32, error) { - if data == common.MetachainTopicIdentifier { - return common.MetachainShardId, nil - } - val, err := strconv.Atoi(data) - if err != nil { - return 0, err - } - if uint32(val) >= notifier.shardCoordinator.NumberOfShards() || val < 0 { - return 0, fmt.Errorf("invalid value in crossShardPeerTopicNotifier.getShardID %d", val) - } - - return uint32(val), nil -} - -func (notifier *crossShardPeerTopicNotifier) checkAndAddShardID(pid core.PeerID, shardID1 uint32, topic string, shardID2 uint32) { - if shardID1 != notifier.shardCoordinator.SelfId() { - return - } - - log.Trace("crossShardPeerTopicNotifier.NewPeerFound found a cross shard peer", - "topic", topic, - "pid", pid.Pretty(), - "shard", shardID2) - notifier.peerShardMapper.PutPeerIdShardId(pid, shardID2) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (notifier *crossShardPeerTopicNotifier) IsInterfaceNil() bool { - return notifier == nil -} diff --git a/heartbeat/monitor/crossShardPeerTopicNotifier_test.go b/heartbeat/monitor/crossShardPeerTopicNotifier_test.go deleted file mode 100644 index e4951586852..00000000000 --- a/heartbeat/monitor/crossShardPeerTopicNotifier_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package monitor - -import ( - "math" - "testing" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/heartbeat" - "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/testscommon" - "github.com/stretchr/testify/assert" -) - -func createMockArgsCrossShardPeerTopicNotifier() ArgsCrossShardPeerTopicNotifier { - return ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: &testscommon.ShardsCoordinatorMock{ - NoShards: 3, - CurrentShard: 1, - }, - PeerShardMapper: &mock.PeerShardMapperStub{}, - } -} - -func TestNewCrossShardPeerTopicNotifier(t *testing.T) { - t.Parallel() - - t.Run("nil sharding coordinator should error", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.ShardCoordinator = nil - - notifier, err := NewCrossShardPeerTopicNotifier(args) - assert.True(t, check.IfNil(notifier)) - assert.Equal(t, heartbeat.ErrNilShardCoordinator, err) - }) - t.Run("nil peer shard mapper should error", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = nil - - notifier, err := NewCrossShardPeerTopicNotifier(args) - assert.True(t, check.IfNil(notifier)) - assert.Equal(t, heartbeat.ErrNilPeerShardMapper, err) - }) - t.Run("should work", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - - notifier, err := NewCrossShardPeerTopicNotifier(args) - assert.False(t, check.IfNil(notifier)) - assert.Nil(t, err) - }) -} - -func TestCrossShardPeerTopicNotifier_NewPeerFound(t *testing.T) { - t.Parallel() - - testTopic := "test" - t.Run("global topic should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - notifier.NewPeerFound("pid", "random topic") - }) - t.Run("intra-shard topic should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(0, 0) - notifier.NewPeerFound("pid", topic) - }) - t.Run("cross-shard topic but not relevant to current node should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(0, 2) - notifier.NewPeerFound("pid", topic) - }) - t.Run("first shard ID is a NaN should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_NaN_1" - notifier.NewPeerFound("pid", topic) - }) - t.Run("second shard ID is a NaN should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_1_NaN" - notifier.NewPeerFound("pid", topic) - }) - t.Run("second shard ID is a negative value should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_1_-1" - notifier.NewPeerFound("pid", topic) - }) - t.Run("second shard ID is an out of range value should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_1_4" - notifier.NewPeerFound("pid", topic) - }) - t.Run("same shard IDs should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_0_0" - notifier.NewPeerFound("pid", topic) - }) - t.Run("cross-shard between 0 and 1 should notice", func(t *testing.T) { - t.Parallel() - - expectedPid := core.PeerID("pid") - notifiedShardID := uint32(math.MaxUint32) - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Equal(t, pid, expectedPid) - notifiedShardID = shardId - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(0, 1) - notifier.NewPeerFound("pid", topic) - assert.Equal(t, uint32(0), notifiedShardID) - }) - t.Run("cross-shard between 1 and 2 should notice", func(t *testing.T) { - t.Parallel() - - expectedPid := core.PeerID("pid") - notifiedShardID := uint32(math.MaxUint32) - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Equal(t, pid, expectedPid) - notifiedShardID = shardId - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(1, 2) - notifier.NewPeerFound("pid", topic) - assert.Equal(t, uint32(2), notifiedShardID) - }) - t.Run("cross-shard between 1 and META should notice", func(t *testing.T) { - t.Parallel() - - expectedPid := core.PeerID("pid") - notifiedShardID := uint32(math.MaxUint32) - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Equal(t, pid, expectedPid) - notifiedShardID = shardId - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(1, common.MetachainShardId) - notifier.NewPeerFound("pid", topic) - assert.Equal(t, common.MetachainShardId, notifiedShardID) - }) - t.Run("cross-shard between META and 1 should notice", func(t *testing.T) { - t.Parallel() - - expectedPid := core.PeerID("pid") - notifiedShardID := uint32(math.MaxUint32) - args := createMockArgsCrossShardPeerTopicNotifier() - args.ShardCoordinator = &testscommon.ShardsCoordinatorMock{ - NoShards: 3, - CurrentShard: common.MetachainShardId, - } - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Equal(t, pid, expectedPid) - notifiedShardID = shardId - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(common.MetachainShardId, 1) - notifier.NewPeerFound("pid", topic) - assert.Equal(t, uint32(1), notifiedShardID) - }) -} - -func BenchmarkCrossShardPeerTopicNotifier_NewPeerFound(b *testing.B) { - args := createMockArgsCrossShardPeerTopicNotifier() - notifier, _ := NewCrossShardPeerTopicNotifier(args) - - for i := 0; i < b.N; i++ { - switch i % 6 { - case 0: - notifier.NewPeerFound("pid", "global") - case 2: - notifier.NewPeerFound("pid", "intrashard_1") - case 3: - notifier.NewPeerFound("pid", "crossshard_1_2") - case 4: - notifier.NewPeerFound("pid", "crossshard_1_META") - case 5: - notifier.NewPeerFound("pid", "crossshard_META_1") - case 6: - notifier.NewPeerFound("pid", "crossshard_2_META") - } - } -} diff --git a/integrationTests/api/transaction_test.go b/integrationTests/api/transaction_test.go index c4267676343..2ecb27b850c 100644 --- a/integrationTests/api/transaction_test.go +++ b/integrationTests/api/transaction_test.go @@ -14,6 +14,10 @@ import ( ) func TestTransactionGroup(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + node := integrationTests.NewTestProcessorNodeWithTestWebServer(3, 0, 0) testTransactionGasCostWithMissingFields(t, node) diff --git a/integrationTests/benchmarks/loadFromTrie_test.go b/integrationTests/benchmarks/loadFromTrie_test.go index c3c7a99f573..866368e7cf4 100644 --- a/integrationTests/benchmarks/loadFromTrie_test.go +++ b/integrationTests/benchmarks/loadFromTrie_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" @@ -32,6 +33,10 @@ func TestTrieLoadTime(t *testing.T) { } func TestTrieLoadTimeForOneLevel(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + numTrieLevels := 1 numTries := 10000 numChildrenPerBranch := 8 @@ -63,7 +68,7 @@ func testTrieLoadTime(t *testing.T, numChildrenPerBranch int, numTries int, maxT func timeTrieRecreate(tries []*keyForTrie, depth int) { startTime := time.Now() for j := range tries { - _, _ = tries[j].tr.Recreate(tries[j].key) + _, _ = tries[j].tr.Recreate(holders.NewDefaultRootHashesHolder(tries[j].key)) } duration := time.Since(startTime) fmt.Printf("trie with depth %d, duration %d \n", depth, duration.Nanoseconds()/int64(len(tries))) @@ -100,7 +105,7 @@ func generateTriesWithMaxDepth( key := insertKeysIntoTrie(t, tr, numTrieLevels, numChildrenPerBranch) rootHash, _ := tr.RootHash() - collapsedTrie, _ := tr.Recreate(rootHash) + collapsedTrie, _ := tr.Recreate(holders.NewDefaultRootHashesHolder(rootHash)) if numTrieLevels == 1 { key = rootHash diff --git a/integrationTests/chainSimulator/common.go b/integrationTests/chainSimulator/common.go new file mode 100644 index 00000000000..0e29c33e617 --- /dev/null +++ b/integrationTests/chainSimulator/common.go @@ -0,0 +1,45 @@ +package chainSimulator + +import ( + "math/big" + + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + + "github.com/multiversx/mx-chain-core-go/data/transaction" +) + +const ( + minGasPrice = 1000000000 + txVersion = 1 + mockTxSignature = "sig" + + // OkReturnCode the const for the ok return code + OkReturnCode = "ok" +) + +var ( + // ZeroValue the variable for the zero big int + ZeroValue = big.NewInt(0) + // OneEGLD the variable for one egld value + OneEGLD = big.NewInt(1000000000000000000) + // MinimumStakeValue the variable for the minimum stake value + MinimumStakeValue = big.NewInt(0).Mul(OneEGLD, big.NewInt(2500)) + // InitialAmount the variable for initial minting amount in account + InitialAmount = big.NewInt(0).Mul(OneEGLD, big.NewInt(100)) +) + +// GenerateTransaction will generate a transaction based on input data +func GenerateTransaction(sender []byte, nonce uint64, receiver []byte, value *big.Int, data string, gasLimit uint64) *transaction.Transaction { + return &transaction.Transaction{ + Nonce: nonce, + Value: value, + SndAddr: sender, + RcvAddr: receiver, + Data: []byte(data), + GasLimit: gasLimit, + GasPrice: minGasPrice, + ChainID: []byte(configs.ChainID), + Version: txVersion, + Signature: []byte(mockTxSignature), + } +} diff --git a/integrationTests/chainSimulator/interface.go b/integrationTests/chainSimulator/interface.go new file mode 100644 index 00000000000..7aba83c5103 --- /dev/null +++ b/integrationTests/chainSimulator/interface.go @@ -0,0 +1,31 @@ +package chainSimulator + +import ( + "math/big" + + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + + "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/transaction" + crypto "github.com/multiversx/mx-chain-crypto-go" +) + +// ChainSimulator defines the operations for an entity that can simulate operations of a chain +type ChainSimulator interface { + GenerateBlocks(numOfBlocks int) error + GenerateBlocksUntilEpochIsReached(targetEpoch int32) error + AddValidatorKeys(validatorsPrivateKeys [][]byte) error + GetNodeHandler(shardID uint32) process.NodeHandler + RemoveAccounts(addresses []string) error + SendTxAndGenerateBlockTilTxIsExecuted(txToSend *transaction.Transaction, maxNumOfBlockToGenerateWhenExecutingTx int) (*transaction.ApiTransactionResult, error) + SendTxsAndGenerateBlocksTilAreExecuted(txsToSend []*transaction.Transaction, maxNumOfBlocksToGenerateWhenExecutingTx int) ([]*transaction.ApiTransactionResult, error) + SetStateMultiple(stateSlice []*dtos.AddressState) error + GenerateAndMintWalletAddress(targetShardID uint32, value *big.Int) (dtos.WalletAddress, error) + GetInitialWalletKeys() *dtos.InitialWalletKeys + GetAccount(address dtos.WalletAddress) (api.AccountResponse, error) + ForceResetValidatorStatisticsCache() error + GetValidatorPrivateKeys() []crypto.PrivateKey + SetKeyValueForAddress(address string, keyValueMap map[string]string) error + Close() +} diff --git a/integrationTests/chainSimulator/relayedTx/relayedTx_test.go b/integrationTests/chainSimulator/relayedTx/relayedTx_test.go new file mode 100644 index 00000000000..72bc9575763 --- /dev/null +++ b/integrationTests/chainSimulator/relayedTx/relayedTx_test.go @@ -0,0 +1,707 @@ +package relayedTx + +import ( + "encoding/hex" + "encoding/json" + "math/big" + "strconv" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" + testsChainSimulator "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/stretchr/testify/require" +) + +const ( + defaultPathToInitialConfig = "../../../cmd/node/config/" + minGasPrice = 1_000_000_000 + minGasLimit = 50_000 + guardAccountCost = 250_000 + extraGasLimitForGuarded = minGasLimit + txVersion = 2 + mockTxSignature = "sig" + maxNumOfBlocksToGenerateWhenExecutingTx = 10 + roundsPerEpoch = 30 +) + +var ( + oneEGLD = big.NewInt(1000000000000000000) + alterConfigsFuncRelayedV3EarlyActivation = func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.RelayedTransactionsV3EnableEpoch = 1 + cfg.EpochConfig.EnableEpochs.FixRelayedBaseCostEnableEpoch = 1 + } +) + +func TestRelayedTransactionInMultiShardEnvironmentWithChainSimulator(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + cs := startChainSimulator(t, alterConfigsFuncRelayedV3EarlyActivation) + defer cs.Close() + + initialBalance := big.NewInt(0).Mul(oneEGLD, big.NewInt(30000)) + relayer, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + sender, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + receiver, err := cs.GenerateAndMintWalletAddress(1, big.NewInt(0)) + require.NoError(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + innerTx := generateTransaction(sender.Bytes, 0, receiver.Bytes, oneEGLD, "", minGasLimit) + innerTx.RelayerAddr = relayer.Bytes + + sender2, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + receiver2, err := cs.GenerateAndMintWalletAddress(0, big.NewInt(0)) + require.NoError(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + innerTx2 := generateTransaction(sender2.Bytes, 0, receiver2.Bytes, oneEGLD, "", minGasLimit) + innerTx2.RelayerAddr = relayer.Bytes + + pkConv := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter() + + // innerTx3Failure should fail due to less gas limit + // deploy a wrapper contract + owner, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + scCode := wasm.GetSCCode("testData/egld-esdt-swap.wasm") + params := []string{scCode, wasm.VMTypeHex, wasm.DummyCodeMetadataHex, hex.EncodeToString([]byte("WEGLD"))} + txDataDeploy := strings.Join(params, "@") + deployTx := generateTransaction(owner.Bytes, 0, make([]byte, 32), big.NewInt(0), txDataDeploy, 600000000) + + result, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(deployTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + scAddress := result.Logs.Events[0].Address + scAddressBytes, _ := pkConv.Decode(scAddress) + + // try a wrap transaction which will fail as the contract is paused + txDataWrap := "wrapEgld" + gasLimit := 2300000 + innerTx3Failure := generateTransaction(owner.Bytes, 1, scAddressBytes, big.NewInt(1), txDataWrap, uint64(gasLimit)) + innerTx3Failure.RelayerAddr = relayer.Bytes + + innerTx3 := generateTransaction(sender.Bytes, 1, receiver2.Bytes, oneEGLD, "", minGasLimit) + innerTx3.RelayerAddr = relayer.Bytes + + innerTxs := []*transaction.Transaction{innerTx, innerTx2, innerTx3Failure, innerTx3} + + // relayer will consume first a move balance for each inner tx, then the specific gas for each inner tx + relayedTxGasLimit := uint64(0) + for _, tx := range innerTxs { + relayedTxGasLimit += minGasLimit + tx.GasLimit + } + relayedTx := generateTransaction(relayer.Bytes, 0, relayer.Bytes, big.NewInt(0), "", relayedTxGasLimit) + relayedTx.InnerTransactions = innerTxs + + result, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(relayedTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + // generate few more blocks for the cross shard scrs to be done + err = cs.GenerateBlocks(maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + economicsData := cs.GetNodeHandler(0).GetCoreComponents().EconomicsData() + relayerMoveBalanceFee := economicsData.ComputeMoveBalanceFee(relayedTx) + expectedRelayerFee := big.NewInt(0).Mul(relayerMoveBalanceFee, big.NewInt(int64(len(relayedTx.InnerTransactions)))) + for _, tx := range innerTxs { + expectedRelayerFee.Add(expectedRelayerFee, economicsData.ComputeTxFee(tx)) + } + checkBalance(t, cs, relayer, big.NewInt(0).Sub(initialBalance, expectedRelayerFee)) + + checkBalance(t, cs, sender, big.NewInt(0).Sub(initialBalance, big.NewInt(0).Mul(oneEGLD, big.NewInt(2)))) + + checkBalance(t, cs, sender2, big.NewInt(0).Sub(initialBalance, oneEGLD)) + + checkBalance(t, cs, receiver, oneEGLD) + + checkBalance(t, cs, receiver2, big.NewInt(0).Mul(oneEGLD, big.NewInt(2))) + + // check SCRs + shardC := cs.GetNodeHandler(0).GetShardCoordinator() + for _, scr := range result.SmartContractResults { + checkSCRSucceeded(t, cs, pkConv, shardC, scr) + } + + // 3 log events from the failed sc call + require.Equal(t, 3, len(result.Logs.Events)) + require.True(t, strings.Contains(string(result.Logs.Events[2].Data), "contract is paused")) +} + +func TestRelayedTransactionInMultiShardEnvironmentWithChainSimulatorScCalls(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + cs := startChainSimulator(t, alterConfigsFuncRelayedV3EarlyActivation) + defer cs.Close() + + initialBalance := big.NewInt(0).Mul(oneEGLD, big.NewInt(10)) + relayer, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + pkConv := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter() + shardC := cs.GetNodeHandler(0).GetShardCoordinator() + + // deploy adder contract + owner, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + ownerNonce := uint64(0) + scCode := wasm.GetSCCode("testData/adder.wasm") + params := []string{scCode, wasm.VMTypeHex, wasm.DummyCodeMetadataHex, "00"} + txDataDeploy := strings.Join(params, "@") + deployTx := generateTransaction(owner.Bytes, ownerNonce, make([]byte, 32), big.NewInt(0), txDataDeploy, 100000000) + + result, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(deployTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + scAddress := result.Logs.Events[0].Address + scAddressBytes, _ := pkConv.Decode(scAddress) + scShard := shardC.ComputeId(scAddressBytes) + scShardNodeHandler := cs.GetNodeHandler(scShard) + + // 1st inner tx, successful add 1 + ownerNonce++ + txDataAdd := "add@" + hex.EncodeToString(big.NewInt(1).Bytes()) + innerTx1 := generateTransaction(owner.Bytes, ownerNonce, scAddressBytes, big.NewInt(0), txDataAdd, 5000000) + innerTx1.RelayerAddr = relayer.Bytes + + // 2nd inner tx, successful add 1 + ownerNonce++ + innerTx2 := generateTransaction(owner.Bytes, ownerNonce, scAddressBytes, big.NewInt(0), txDataAdd, 5000000) + innerTx2.RelayerAddr = relayer.Bytes + + // 3rd inner tx, wrong number of parameters + ownerNonce++ + innerTx3 := generateTransaction(owner.Bytes, ownerNonce, scAddressBytes, big.NewInt(0), "add", 5000000) + innerTx3.RelayerAddr = relayer.Bytes + + // 4th inner tx, successful add 1 + ownerNonce++ + innerTx4 := generateTransaction(owner.Bytes, ownerNonce, scAddressBytes, big.NewInt(0), txDataAdd, 5000000) + innerTx4.RelayerAddr = relayer.Bytes + + // 5th inner tx, invalid function + ownerNonce++ + innerTx5 := generateTransaction(owner.Bytes, ownerNonce, scAddressBytes, big.NewInt(0), "substract", 5000000) + innerTx5.RelayerAddr = relayer.Bytes + + // 6th inner tx, successful add 1 + ownerNonce++ + innerTx6 := generateTransaction(owner.Bytes, ownerNonce, scAddressBytes, big.NewInt(0), txDataAdd, 5000000) + innerTx6.RelayerAddr = relayer.Bytes + + // 7th inner tx, not enough gas + ownerNonce++ + innerTx7 := generateTransaction(owner.Bytes, ownerNonce, scAddressBytes, big.NewInt(0), txDataAdd, 100000) + innerTx7.RelayerAddr = relayer.Bytes + + innerTxs := []*transaction.Transaction{innerTx1, innerTx2, innerTx3, innerTx4, innerTx5, innerTx6, innerTx7} + + relayedTxGasLimit := uint64(0) + for _, tx := range innerTxs { + relayedTxGasLimit += minGasLimit + tx.GasLimit + } + relayedTx := generateTransaction(relayer.Bytes, 0, relayer.Bytes, big.NewInt(0), "", relayedTxGasLimit) + relayedTx.InnerTransactions = innerTxs + + result, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(relayedTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + checkSum(t, scShardNodeHandler, scAddressBytes, owner.Bytes, 4) + + // 8 scrs, 4 from the succeeded txs + 4 with refunded gas to relayer + require.Equal(t, 8, len(result.SmartContractResults)) + for _, scr := range result.SmartContractResults { + if strings.Contains(scr.ReturnMessage, "gas refund for relayer") { + continue + } + + checkSCRSucceeded(t, cs, pkConv, shardC, scr) + } + + // 6 events, 3 with signalError + 3 with the actual errors + require.Equal(t, 6, len(result.Logs.Events)) + expectedLogEvents := map[int]string{ + 1: "[wrong number of arguments]", + 3: "[invalid function (not found)] [substract]", + 5: "[not enough gas] [add]", + } + for idx, logEvent := range result.Logs.Events { + if logEvent.Identifier == "signalError" { + continue + } + + expectedLogEvent := expectedLogEvents[idx] + require.True(t, strings.Contains(string(logEvent.Data), expectedLogEvent)) + } +} + +func TestFixRelayedMoveBalanceWithChainSimulator(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + expectedFeeScCallBefore := "815294920000000" + expectedFeeScCallAfter := "873704920000000" + t.Run("sc call", testFixRelayedMoveBalanceWithChainSimulatorScCall(expectedFeeScCallBefore, expectedFeeScCallAfter)) + + expectedFeeMoveBalanceBefore := "797500000000000" // 498 * 1500 + 50000 + 5000 + expectedFeeMoveBalanceAfter := "847000000000000" // 498 * 1500 + 50000 + 50000 + t.Run("move balance", testFixRelayedMoveBalanceWithChainSimulatorMoveBalance(expectedFeeMoveBalanceBefore, expectedFeeMoveBalanceAfter)) +} + +func testFixRelayedMoveBalanceWithChainSimulatorScCall( + expectedFeeBeforeFix string, + expectedFeeAfterFix string, +) func(t *testing.T) { + return func(t *testing.T) { + + providedActivationEpoch := uint32(7) + alterConfigsFunc := func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.FixRelayedBaseCostEnableEpoch = providedActivationEpoch + } + + cs := startChainSimulator(t, alterConfigsFunc) + defer cs.Close() + + pkConv := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter() + + initialBalance := big.NewInt(0).Mul(oneEGLD, big.NewInt(10)) + relayer, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + // deploy adder contract + owner, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + // generate one block so the minting has effect + err = cs.GenerateBlocks(1) + require.NoError(t, err) + + scCode := wasm.GetSCCode("testData/adder.wasm") + params := []string{scCode, wasm.VMTypeHex, wasm.DummyCodeMetadataHex, "00"} + txDataDeploy := strings.Join(params, "@") + deployTx := generateTransaction(owner.Bytes, 0, make([]byte, 32), big.NewInt(0), txDataDeploy, 100000000) + + result, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(deployTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + scAddress := result.Logs.Events[0].Address + scAddressBytes, _ := pkConv.Decode(scAddress) + + // fast-forward until epoch 4 + err = cs.GenerateBlocksUntilEpochIsReached(int32(4)) + require.NoError(t, err) + + // send relayed tx + txDataAdd := "add@" + hex.EncodeToString(big.NewInt(1).Bytes()) + innerTx := generateTransaction(owner.Bytes, 1, scAddressBytes, big.NewInt(0), txDataAdd, 3000000) + marshalledTx, err := json.Marshal(innerTx) + require.NoError(t, err) + txData := []byte("relayedTx@" + hex.EncodeToString(marshalledTx)) + gasLimit := 50000 + uint64(len(txData))*1500 + innerTx.GasLimit + + relayedTx := generateTransaction(relayer.Bytes, 0, owner.Bytes, big.NewInt(0), string(txData), gasLimit) + + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(relayedTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + // send relayed tx, fix still not active + innerTx = generateTransaction(owner.Bytes, 2, scAddressBytes, big.NewInt(0), txDataAdd, 3000000) + marshalledTx, err = json.Marshal(innerTx) + require.NoError(t, err) + txData = []byte("relayedTx@" + hex.EncodeToString(marshalledTx)) + gasLimit = 50000 + uint64(len(txData))*1500 + innerTx.GasLimit + + relayedTx = generateTransaction(relayer.Bytes, 1, owner.Bytes, big.NewInt(0), string(txData), gasLimit) + + relayerBalanceBefore := getBalance(t, cs, relayer) + + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(relayedTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + relayerBalanceAfter := getBalance(t, cs, relayer) + + feeConsumed := big.NewInt(0).Sub(relayerBalanceBefore, relayerBalanceAfter) + + require.Equal(t, expectedFeeBeforeFix, feeConsumed.String()) + + // fast-forward until the fix is active + err = cs.GenerateBlocksUntilEpochIsReached(int32(providedActivationEpoch)) + require.NoError(t, err) + + // send relayed tx after fix + innerTx = generateTransaction(owner.Bytes, 3, scAddressBytes, big.NewInt(0), txDataAdd, 3000000) + marshalledTx, err = json.Marshal(innerTx) + require.NoError(t, err) + txData = []byte("relayedTx@" + hex.EncodeToString(marshalledTx)) + gasLimit = 50000 + uint64(len(txData))*1500 + innerTx.GasLimit + + relayedTx = generateTransaction(relayer.Bytes, 2, owner.Bytes, big.NewInt(0), string(txData), gasLimit) + + relayerBalanceBefore = getBalance(t, cs, relayer) + + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(relayedTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + relayerBalanceAfter = getBalance(t, cs, relayer) + + feeConsumed = big.NewInt(0).Sub(relayerBalanceBefore, relayerBalanceAfter) + + require.Equal(t, expectedFeeAfterFix, feeConsumed.String()) + } +} + +func testFixRelayedMoveBalanceWithChainSimulatorMoveBalance( + expectedFeeBeforeFix string, + expectedFeeAfterFix string, +) func(t *testing.T) { + return func(t *testing.T) { + + providedActivationEpoch := uint32(5) + alterConfigsFunc := func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.FixRelayedBaseCostEnableEpoch = providedActivationEpoch + } + + cs := startChainSimulator(t, alterConfigsFunc) + defer cs.Close() + + initialBalance := big.NewInt(0).Mul(oneEGLD, big.NewInt(10)) + relayer, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + sender, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + receiver, err := cs.GenerateAndMintWalletAddress(0, big.NewInt(0)) + require.NoError(t, err) + + // generate one block so the minting has effect + err = cs.GenerateBlocks(1) + require.NoError(t, err) + + // send relayed tx + innerTx := generateTransaction(sender.Bytes, 0, receiver.Bytes, oneEGLD, "", 50000) + marshalledTx, err := json.Marshal(innerTx) + require.NoError(t, err) + txData := []byte("relayedTx@" + hex.EncodeToString(marshalledTx)) + gasLimit := 50000 + uint64(len(txData))*1500 + innerTx.GasLimit + + relayedTx := generateTransaction(relayer.Bytes, 0, sender.Bytes, big.NewInt(0), string(txData), gasLimit) + + relayerBalanceBefore := getBalance(t, cs, relayer) + + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(relayedTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + relayerBalanceAfter := getBalance(t, cs, relayer) + + feeConsumed := big.NewInt(0).Sub(relayerBalanceBefore, relayerBalanceAfter) + + require.Equal(t, expectedFeeBeforeFix, feeConsumed.String()) + + // fast-forward until the fix is active + err = cs.GenerateBlocksUntilEpochIsReached(int32(providedActivationEpoch)) + require.NoError(t, err) + + // send relayed tx + innerTx = generateTransaction(sender.Bytes, 1, receiver.Bytes, oneEGLD, "", 50000) + marshalledTx, err = json.Marshal(innerTx) + require.NoError(t, err) + txData = []byte("relayedTx@" + hex.EncodeToString(marshalledTx)) + gasLimit = 50000 + uint64(len(txData))*1500 + innerTx.GasLimit + + relayedTx = generateTransaction(relayer.Bytes, 1, sender.Bytes, big.NewInt(0), string(txData), gasLimit) + + relayerBalanceBefore = getBalance(t, cs, relayer) + + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(relayedTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + relayerBalanceAfter = getBalance(t, cs, relayer) + + feeConsumed = big.NewInt(0).Sub(relayerBalanceBefore, relayerBalanceAfter) + + require.Equal(t, expectedFeeAfterFix, feeConsumed.String()) + } +} + +func TestRelayedTransactionInMultiShardEnvironmentWithChainSimulatorInnerNotExecutable(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + cs := startChainSimulator(t, alterConfigsFuncRelayedV3EarlyActivation) + defer cs.Close() + + initialBalance := big.NewInt(0).Mul(oneEGLD, big.NewInt(10)) + relayer, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + sender, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + sender2, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + guardian, err := cs.GenerateAndMintWalletAddress(0, initialBalance) + require.NoError(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + // Set guardian for sender + senderNonce := uint64(0) + setGuardianTxData := "SetGuardian@" + hex.EncodeToString(guardian.Bytes) + "@" + hex.EncodeToString([]byte("uuid")) + setGuardianGasLimit := minGasLimit + 1500*len(setGuardianTxData) + guardAccountCost + setGuardianTx := generateTransaction(sender.Bytes, senderNonce, sender.Bytes, big.NewInt(0), setGuardianTxData, uint64(setGuardianGasLimit)) + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(setGuardianTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + // fast-forward until the guardian becomes active + err = cs.GenerateBlocks(roundsPerEpoch * 20) + require.NoError(t, err) + + // guard account + senderNonce++ + guardAccountTxData := "GuardAccount" + guardAccountGasLimit := minGasLimit + 1500*len(guardAccountTxData) + guardAccountCost + guardAccountTx := generateTransaction(sender.Bytes, senderNonce, sender.Bytes, big.NewInt(0), guardAccountTxData, uint64(guardAccountGasLimit)) + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(guardAccountTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + receiver, err := cs.GenerateAndMintWalletAddress(1, big.NewInt(0)) + require.NoError(t, err) + + // move balance inner transaction non-executable due to guardian mismatch + senderNonce++ + innerTx := generateTransaction(sender.Bytes, senderNonce, receiver.Bytes, oneEGLD, "", minGasLimit+extraGasLimitForGuarded) + innerTx.RelayerAddr = relayer.Bytes + innerTx.GuardianAddr = sender.Bytes // this is not the real guardian + innerTx.GuardianSignature = []byte(mockTxSignature) + innerTx.Options = 2 + + // move balance inner transaction non-executable due to higher nonce + nonceTooHigh := uint64(100) + innerTx2 := generateTransaction(sender2.Bytes, nonceTooHigh, receiver.Bytes, oneEGLD, "", minGasLimit) + innerTx2.RelayerAddr = relayer.Bytes + + innerTxs := []*transaction.Transaction{innerTx, innerTx2} + + // relayer will consume first a move balance for each inner tx, then the specific gas for each inner tx + relayedTxGasLimit := uint64(0) + for _, tx := range innerTxs { + relayedTxGasLimit += minGasLimit + tx.GasLimit + } + relayedTx := generateTransaction(relayer.Bytes, 0, relayer.Bytes, big.NewInt(0), "", relayedTxGasLimit) + relayedTx.InnerTransactions = innerTxs + + result, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(relayedTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + // generate few more blocks for the cross shard scrs to be done + err = cs.GenerateBlocks(maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + // check the inner tx failed with the desired error + require.Equal(t, 2, len(result.SmartContractResults)) + require.True(t, strings.Contains(result.SmartContractResults[0].ReturnMessage, process.ErrTransactionNotExecutable.Error())) + require.True(t, strings.Contains(result.SmartContractResults[0].ReturnMessage, process.ErrTransactionAndAccountGuardianMismatch.Error())) + require.True(t, strings.Contains(result.SmartContractResults[1].ReturnMessage, process.ErrHigherNonceInTransaction.Error())) + + // check events + require.Equal(t, 2, len(result.Logs.Events)) + for _, event := range result.Logs.Events { + require.Equal(t, core.SignalErrorOperation, event.Identifier) + } + + // compute expected consumed fee for relayer + expectedConsumedGasForGuardedInnerTx := minGasLimit + minGasLimit + extraGasLimitForGuarded // invalid guardian + expectedConsumedGasForHigherNonceInnerTx := minGasLimit + minGasLimit // higher nonce + expectedConsumeGas := expectedConsumedGasForGuardedInnerTx + expectedConsumedGasForHigherNonceInnerTx + expectedRelayerFee := core.SafeMul(uint64(expectedConsumeGas), minGasPrice) + checkBalance(t, cs, relayer, big.NewInt(0).Sub(initialBalance, expectedRelayerFee)) + + checkBalance(t, cs, receiver, big.NewInt(0)) + + relayerBalanceBeforeSuccessfullAttempt := getBalance(t, cs, relayer) + + // generate a valid guarded move balance inner tx + // senderNonce would be the same, as previous failed tx didn't increase it(expected) + innerTx = generateTransaction(sender.Bytes, senderNonce, receiver.Bytes, oneEGLD, "", minGasLimit+extraGasLimitForGuarded) + innerTx.RelayerAddr = relayer.Bytes + innerTx.GuardianAddr = guardian.Bytes + innerTx.GuardianSignature = []byte(mockTxSignature) + innerTx.Options = 2 + + innerTxs = []*transaction.Transaction{innerTx} + relayedTx = generateTransaction(relayer.Bytes, 1, relayer.Bytes, big.NewInt(0), "", relayedTxGasLimit) + relayedTx.InnerTransactions = innerTxs + + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(relayedTx, maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + // generate few more blocks for the cross shard scrs to be done + err = cs.GenerateBlocks(maxNumOfBlocksToGenerateWhenExecutingTx) + require.NoError(t, err) + + expectedRelayerFee = core.SafeMul(uint64(expectedConsumedGasForGuardedInnerTx), minGasPrice) + checkBalance(t, cs, relayer, big.NewInt(0).Sub(relayerBalanceBeforeSuccessfullAttempt, expectedRelayerFee)) + + checkBalance(t, cs, receiver, oneEGLD) +} + +func startChainSimulator( + t *testing.T, + alterConfigsFunction func(cfg *config.Configs), +) testsChainSimulator.ChainSimulator { + roundDurationInMillis := uint64(6000) + roundsPerEpochOpt := core.OptionalUint64{ + HasValue: true, + Value: roundsPerEpoch, + } + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpochOpt, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: alterConfigsFunction, + }) + require.NoError(t, err) + require.NotNil(t, cs) + + err = cs.GenerateBlocksUntilEpochIsReached(1) + require.NoError(t, err) + + return cs +} + +func generateTransaction(sender []byte, nonce uint64, receiver []byte, value *big.Int, data string, gasLimit uint64) *transaction.Transaction { + return &transaction.Transaction{ + Nonce: nonce, + Value: value, + SndAddr: sender, + RcvAddr: receiver, + Data: []byte(data), + GasLimit: gasLimit, + GasPrice: minGasPrice, + ChainID: []byte(configs.ChainID), + Version: txVersion, + Signature: []byte(mockTxSignature), + } +} + +func checkSum( + t *testing.T, + nodeHandler chainSimulatorProcess.NodeHandler, + scAddress []byte, + callerAddress []byte, + expectedSum int, +) { + scQuery := &process.SCQuery{ + ScAddress: scAddress, + FuncName: "getSum", + CallerAddr: callerAddress, + CallValue: big.NewInt(0), + } + result, _, err := nodeHandler.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, "ok", result.ReturnCode) + + sum, err := strconv.Atoi(hex.EncodeToString(result.ReturnData[0])) + require.NoError(t, err) + + require.Equal(t, expectedSum, sum) +} + +func checkSCRSucceeded( + t *testing.T, + cs testsChainSimulator.ChainSimulator, + pkConv core.PubkeyConverter, + shardC sharding.Coordinator, + scr *transaction.ApiSmartContractResult, +) { + addr, err := pkConv.Decode(scr.RcvAddr) + require.NoError(t, err) + + senderShard := shardC.ComputeId(addr) + tx, err := cs.GetNodeHandler(senderShard).GetFacadeHandler().GetTransaction(scr.Hash, true) + require.NoError(t, err) + require.Equal(t, transaction.TxStatusSuccess, tx.Status) + + if tx.ReturnMessage == core.GasRefundForRelayerMessage { + return + } + + require.GreaterOrEqual(t, len(tx.Logs.Events), 1) + for _, event := range tx.Logs.Events { + if event.Identifier == core.WriteLogIdentifier { + continue + } + + require.Equal(t, core.CompletedTxEventIdentifier, event.Identifier) + } +} + +func getBalance( + t *testing.T, + cs testsChainSimulator.ChainSimulator, + address dtos.WalletAddress, +) *big.Int { + account, err := cs.GetAccount(address) + require.NoError(t, err) + + balance, ok := big.NewInt(0).SetString(account.Balance, 10) + require.True(t, ok) + + return balance +} + +func checkBalance( + t *testing.T, + cs testsChainSimulator.ChainSimulator, + address dtos.WalletAddress, + expectedBalance *big.Int, +) { + balance := getBalance(t, cs, address) + require.Equal(t, expectedBalance.String(), balance.String()) +} diff --git a/integrationTests/chainSimulator/relayedTx/testData/adder.wasm b/integrationTests/chainSimulator/relayedTx/testData/adder.wasm new file mode 100644 index 00000000000..b6bc9b4e13b Binary files /dev/null and b/integrationTests/chainSimulator/relayedTx/testData/adder.wasm differ diff --git a/integrationTests/chainSimulator/relayedTx/testData/egld-esdt-swap.wasm b/integrationTests/chainSimulator/relayedTx/testData/egld-esdt-swap.wasm new file mode 100644 index 00000000000..7244307f1cc Binary files /dev/null and b/integrationTests/chainSimulator/relayedTx/testData/egld-esdt-swap.wasm differ diff --git a/integrationTests/chainSimulator/staking/common.go b/integrationTests/chainSimulator/staking/common.go new file mode 100644 index 00000000000..4de97df500e --- /dev/null +++ b/integrationTests/chainSimulator/staking/common.go @@ -0,0 +1,104 @@ +package staking + +import ( + "encoding/hex" + "math/big" + "testing" + + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/vm" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/stretchr/testify/require" +) + +const ( + // MockBLSSignature the const for a mocked bls signature + MockBLSSignature = "010101" + // GasLimitForStakeOperation the const for the gas limit value for the stake operation + GasLimitForStakeOperation = 50_000_000 + // GasLimitForUnBond the const for the gas limit value for the unBond operation + GasLimitForUnBond = 12_000_000 + // MaxNumOfBlockToGenerateWhenExecutingTx the const for the maximum number of block to generate when execute a transaction + MaxNumOfBlockToGenerateWhenExecutingTx = 7 + + // QueuedStatus the const for the queued status of a validators + QueuedStatus = "queued" + // StakedStatus the const for the staked status of a validators + StakedStatus = "staked" + // NotStakedStatus the const for the notStaked status of a validators + NotStakedStatus = "notStaked" + // AuctionStatus the const for the action status of a validators + AuctionStatus = "auction" + // UnStakedStatus the const for the unStaked status of a validators + UnStakedStatus = "unStaked" +) + +var ( + //InitialDelegationValue the variable for the initial delegation value + InitialDelegationValue = big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(1250)) +) + +// GetNonce will return the nonce of the provided address +func GetNonce(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, address dtos.WalletAddress) uint64 { + account, err := cs.GetAccount(address) + require.Nil(t, err) + + return account.Nonce +} + +// GetBLSKeyStatus will return the bls key status +func GetBLSKeyStatus(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) string { + scQuery := &process.SCQuery{ + ScAddress: vm.StakingSCAddress, + FuncName: "getBLSKeyStatus", + CallerAddr: vm.StakingSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.OkReturnCode, result.ReturnCode) + + return string(result.ReturnData[0]) +} + +// GetAllNodeStates will return the status of all the nodes that belong to the provided address +func GetAllNodeStates(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte) map[string]string { + scQuery := &process.SCQuery{ + ScAddress: address, + FuncName: "getAllNodeStates", + CallerAddr: vm.StakingSCAddress, + CallValue: big.NewInt(0), + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.OkReturnCode, result.ReturnCode) + + m := make(map[string]string) + status := "" + for _, resultData := range result.ReturnData { + if len(resultData) != 96 { + // not a BLS key + status = string(resultData) + continue + } + + m[hex.EncodeToString(resultData)] = status + } + + return m +} + +// CheckValidatorStatus will compare the status of the provided bls key with the provided expected status +func CheckValidatorStatus(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, blsKey string, expectedStatus string) { + err := cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + + validatorsStatistics, err := cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + require.Equal(t, expectedStatus, validatorsStatistics[blsKey].ValidatorStatus) +} diff --git a/integrationTests/chainSimulator/staking/jail/jail_test.go b/integrationTests/chainSimulator/staking/jail/jail_test.go new file mode 100644 index 00000000000..bb449da993f --- /dev/null +++ b/integrationTests/chainSimulator/staking/jail/jail_test.go @@ -0,0 +1,257 @@ +package jail + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator/staking" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/vm" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/stretchr/testify/require" +) + +const ( + stakingV4JailUnJailStep1EnableEpoch = 5 + defaultPathToInitialConfig = "../../../../cmd/node/config/" + + epochWhenNodeIsJailed = 4 +) + +// Test description +// All test cases will do a stake transaction and wait till the new node is jailed +// testcase1 -- unJail transaction will be sent when staking v3.5 is still action --> node status should be `new` after unjail +// testcase2 -- unJail transaction will be sent when staking v4 step1 is action --> node status should be `auction` after unjail +// testcase3 -- unJail transaction will be sent when staking v4 step2 is action --> node status should be `auction` after unjail +// testcase4 -- unJail transaction will be sent when staking v4 step3 is action --> node status should be `auction` after unjail +func TestChainSimulator_ValidatorJailUnJail(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 4, "new") + }) + + t.Run("staking ph 4 step 1 active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 5, "auction") + }) + + t.Run("staking ph 4 step 2 active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 6, "auction") + }) + + t.Run("staking ph 4 step 3 active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 7, "auction") + }) +} + +func testChainSimulatorJailAndUnJail(t *testing.T, targetEpoch int32, nodeStatusAfterUnJail string) { + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 2, + MetaChainMinNodes: 2, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, stakingV4JailUnJailStep1EnableEpoch) + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 8 // 8 nodes until new nodes will be placed on queue + configs.SetMaxNumberOfNodesInConfigs(cfg, uint32(newNumNodes), 0, numOfShards) + configs.SetQuickJailRatingConfig(cfg) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.GenerateBlocksUntilEpochIsReached(1) + require.Nil(t, err) + + _, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + mintValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(3000)) + walletAddress, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := chainSimulatorIntegrationTests.GenerateTransaction(walletAddress.Bytes, 0, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.MinimumStakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + // wait node to be jailed + err = cs.GenerateBlocksUntilEpochIsReached(epochWhenNodeIsJailed) + require.Nil(t, err) + + decodedBLSKey, _ := hex.DecodeString(blsKeys[0]) + status := staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey) + require.Equal(t, "jailed", status) + + // do an unjail transaction + unJailValue, _ := big.NewInt(0).SetString("2500000000000000000", 10) + txUnJailDataField := fmt.Sprintf("unJail@%s", blsKeys[0]) + txUnJail := chainSimulatorIntegrationTests.GenerateTransaction(walletAddress.Bytes, 1, vm.ValidatorSCAddress, unJailValue, txUnJailDataField, staking.GasLimitForStakeOperation) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + unJailTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnJail, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unJailTx) + require.Equal(t, transaction.TxStatusSuccess, unJailTx.Status) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + status = staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey) + require.Equal(t, "staked", status) + + staking.CheckValidatorStatus(t, cs, blsKeys[0], nodeStatusAfterUnJail) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + staking.CheckValidatorStatus(t, cs, blsKeys[0], "waiting") + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 2) + require.Nil(t, err) + + staking.CheckValidatorStatus(t, cs, blsKeys[0], "eligible") +} + +// Test description +// Add a new node and wait until the node get jailed +// Add a second node to take the place of the jailed node +// UnJail the first node --> should go in queue +// Activate staking v4 step 1 --> node should be unstaked as the queue is cleaned up + +// Internal test scenario #2 +func TestChainSimulator_FromQueueToAuctionList(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, stakingV4JailUnJailStep1EnableEpoch) + configs.SetQuickJailRatingConfig(cfg) + + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 1 + configs.SetMaxNumberOfNodesInConfigs(cfg, uint32(newNumNodes), 0, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.GenerateBlocks(30) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys([][]byte{privateKeys[1]}) + require.Nil(t, err) + + mintValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(6000)) + walletAddress, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := chainSimulatorIntegrationTests.GenerateTransaction(walletAddress.Bytes, 0, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.MinimumStakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + // wait node to be jailed + err = cs.GenerateBlocksUntilEpochIsReached(epochWhenNodeIsJailed) + require.Nil(t, err) + + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + status := staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "jailed", status) + + // add one more node + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], staking.MockBLSSignature) + txStake = chainSimulatorIntegrationTests.GenerateTransaction(walletAddress.Bytes, 1, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.MinimumStakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + decodedBLSKey1, _ := hex.DecodeString(blsKeys[1]) + status = staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey1) + require.Equal(t, "staked", status) + + // unJail the first node + unJailValue, _ := big.NewInt(0).SetString("2500000000000000000", 10) + txUnJailDataField := fmt.Sprintf("unJail@%s", blsKeys[0]) + txUnJail := chainSimulatorIntegrationTests.GenerateTransaction(walletAddress.Bytes, 2, vm.ValidatorSCAddress, unJailValue, txUnJailDataField, staking.GasLimitForStakeOperation) + + unJailTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnJail, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unJailTx) + require.Equal(t, transaction.TxStatusSuccess, unJailTx.Status) + + status = staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "queued", status) + + err = cs.GenerateBlocksUntilEpochIsReached(stakingV4JailUnJailStep1EnableEpoch) + require.Nil(t, err) + + status = staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, staking.UnStakedStatus, status) + + staking.CheckValidatorStatus(t, cs, blsKeys[0], string(common.InactiveList)) +} diff --git a/integrationTests/chainSimulator/staking/stake/simpleStake_test.go b/integrationTests/chainSimulator/staking/stake/simpleStake_test.go new file mode 100644 index 00000000000..bfc9f3c11b6 --- /dev/null +++ b/integrationTests/chainSimulator/staking/stake/simpleStake_test.go @@ -0,0 +1,298 @@ +package stake + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator/staking" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/vm" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/stretchr/testify/require" +) + +// Test scenarios +// Do 3 stake transactions from 3 different wallets - tx value 2499, 2500, 2501 +// testcase1 -- staking v3.5 --> tx1 fail, tx2 - node in queue, tx3 - node in queue with topUp 1 +// testcase2 -- staking v4 step1 --> tx1 fail, tx2 - node in auction, tx3 - node in auction with topUp 1 +// testcase3 -- staking v4 step2 --> tx1 fail, tx2 - node in auction, tx3 - node in auction with topUp 1 +// testcase4 -- staking v3.step3 --> tx1 fail, tx2 - node in auction, tx3 - node in auction with topUp 1 + +// // Internal test scenario #3 +func TestChainSimulator_SimpleStake(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 1, "queued") + }) + + t.Run("staking ph 4 step1", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 2, "auction") + }) + + t.Run("staking ph 4 step2", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 3, "auction") + }) + + t.Run("staking ph 4 step3", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 4, "auction") + }) +} + +func testChainSimulatorSimpleStake(t *testing.T, targetEpoch int32, nodesStatus string) { + if testing.Short() { + t.Skip("this is not a short test") + } + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, 2) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + mintValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(3000)) + wallet1, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + wallet2, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + wallet3, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + _, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(3) + require.Nil(t, err) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + dataFieldTx1 := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + tx1Value := big.NewInt(0).Mul(big.NewInt(2499), chainSimulatorIntegrationTests.OneEGLD) + tx1 := chainSimulatorIntegrationTests.GenerateTransaction(wallet1.Bytes, 0, vm.ValidatorSCAddress, tx1Value, dataFieldTx1, staking.GasLimitForStakeOperation) + + dataFieldTx2 := fmt.Sprintf("stake@01@%s@%s", blsKeys[1], staking.MockBLSSignature) + tx2 := chainSimulatorIntegrationTests.GenerateTransaction(wallet3.Bytes, 0, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.MinimumStakeValue, dataFieldTx2, staking.GasLimitForStakeOperation) + + dataFieldTx3 := fmt.Sprintf("stake@01@%s@%s", blsKeys[2], staking.MockBLSSignature) + tx3Value := big.NewInt(0).Mul(big.NewInt(2501), chainSimulatorIntegrationTests.OneEGLD) + tx3 := chainSimulatorIntegrationTests.GenerateTransaction(wallet2.Bytes, 0, vm.ValidatorSCAddress, tx3Value, dataFieldTx3, staking.GasLimitForStakeOperation) + + results, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{tx1, tx2, tx3}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 3, len(results)) + require.NotNil(t, results) + + // tx1 should fail + require.Equal(t, "insufficient stake value: expected 2500000000000000000000, got 2499000000000000000000", string(results[0].Logs.Events[0].Topics[1])) + + _ = cs.GenerateBlocks(1) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + if targetEpoch < 2 { + bls1, _ := hex.DecodeString(blsKeys[1]) + bls2, _ := hex.DecodeString(blsKeys[2]) + + blsKeyStatus := staking.GetBLSKeyStatus(t, metachainNode, bls1) + require.Equal(t, nodesStatus, blsKeyStatus) + + blsKeyStatus = staking.GetBLSKeyStatus(t, metachainNode, bls2) + require.Equal(t, nodesStatus, blsKeyStatus) + } else { + // tx2 -- validator should be in queue + staking.CheckValidatorStatus(t, cs, blsKeys[1], nodesStatus) + // tx3 -- validator should be in queue + staking.CheckValidatorStatus(t, cs, blsKeys[2], nodesStatus) + } +} + +// Test auction list api calls during stakingV4 step 2 and onwards. +// Nodes configuration at genesis consisting of a total of 32 nodes, distributed on 3 shards + meta: +// - 4 eligible nodes/shard +// - 4 waiting nodes/shard +// - 2 nodes to shuffle per shard +// - max num nodes config for stakingV4 step3 = 24 (being downsized from previously 32 nodes) +// Steps: +// 1. Stake 1 node and check that in stakingV4 step1 it is unstaked +// 2. Re-stake the node to enter the auction list +// 3. From stakingV4 step2 onwards, check that api returns 8 qualified + 1 unqualified nodes +func TestChainSimulator_StakingV4Step2APICalls(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + stakingV4Step1Epoch := uint32(2) + stakingV4Step2Epoch := uint32(3) + stakingV4Step3Epoch := uint32(4) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: uint64(6000), + RoundsPerEpoch: core.OptionalUint64{ + HasValue: true, + Value: 30, + }, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 4, + MetaChainMinNodes: 4, + NumNodesWaitingListMeta: 4, + NumNodesWaitingListShard: 4, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = stakingV4Step1Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = stakingV4Step2Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = stakingV4Step3Epoch + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].MaxNumNodes = 32 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].NodesToShufflePerShard = 2 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = stakingV4Step3Epoch + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].MaxNumNodes = 24 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].NodesToShufflePerShard = 2 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + + mintValue := big.NewInt(0).Add(chainSimulatorIntegrationTests.MinimumStakeValue, chainSimulatorIntegrationTests.OneEGLD) + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + // Stake a new validator that should end up in auction in step 1 + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.MinimumStakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.GenerateBlocksUntilEpochIsReached(int32(stakingV4Step1Epoch)) + require.Nil(t, err) + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // In step 1, only the previously staked node should be in auction list + err = cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + require.Empty(t, auctionList) + + // re-stake the node + txDataField = fmt.Sprintf("reStakeUnStakedNodes@%s", blsKeys[0]) + txReStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, big.NewInt(0), txDataField, staking.GasLimitForStakeOperation) + reStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txReStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, reStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // after the re-stake process, the node should be in auction list + err = cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + auctionList, err = metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + require.Equal(t, []*common.AuctionListValidatorAPIResponse{ + { + Owner: validatorOwner.Bech32, + NumStakedNodes: 1, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: blsKeys[0], + Qualified: true, + }, + }, + }, + }, auctionList) + + // For steps 2,3 and onwards, when making API calls, we'll be using the api nodes config provider to mimic the max number of + // nodes as it will be in step 3. This means we'll see the 8 nodes that were shuffled out from the eligible list, + // plus the additional node that was staked manually. + // Since those 8 shuffled out nodes will be replaced only with another 8 nodes, and the auction list size = 9, + // the outcome should show 8 nodes qualifying and 1 node not qualifying + for epochToSimulate := int32(stakingV4Step2Epoch); epochToSimulate < int32(stakingV4Step3Epoch)+3; epochToSimulate++ { + err = cs.GenerateBlocksUntilEpochIsReached(epochToSimulate) + require.Nil(t, err) + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + qualified, unQualified := getQualifiedAndUnqualifiedNodes(t, metachainNode) + require.Equal(t, 8, len(qualified)) + require.Equal(t, 1, len(unQualified)) + } +} + +func getQualifiedAndUnqualifiedNodes(t *testing.T, metachainNode process.NodeHandler) ([]string, []string) { + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + + qualified := make([]string, 0) + unQualified := make([]string, 0) + + for _, auctionOwnerData := range auctionList { + for _, auctionNode := range auctionOwnerData.Nodes { + if auctionNode.Qualified { + qualified = append(qualified, auctionNode.BlsKey) + } else { + unQualified = append(unQualified, auctionNode.BlsKey) + } + } + } + + return qualified, unQualified +} diff --git a/integrationTests/chainSimulator/staking/stake/stakeAndUnStake_test.go b/integrationTests/chainSimulator/staking/stake/stakeAndUnStake_test.go new file mode 100644 index 00000000000..acb0c7537ed --- /dev/null +++ b/integrationTests/chainSimulator/staking/stake/stakeAndUnStake_test.go @@ -0,0 +1,2626 @@ +package stake + +import ( + "encoding/hex" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator/staking" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/vm" + + "github.com/multiversx/mx-chain-core-go/core" + coreAPI "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/require" +) + +const ( + defaultPathToInitialConfig = "../../../../cmd/node/config/" +) + +var log = logger.GetOrCreate("integrationTests/chainSimulator") + +// TODO scenarios +// Make a staking provider with max num of nodes +// DO a merge transaction + +// Test scenario +// 1. Add a new validator private key in the multi key handler +// 2. Do a stake transaction for the validator key +// 3. Do an unstake transaction (to make a place for the new validator) +// 4. Check if the new validator has generated rewards +func TestChainSimulator_AddValidatorKey(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 8 // 8 nodes until new nodes will be placed on queue + configs.SetMaxNumberOfNodesInConfigs(cfg, uint32(newNumNodes), 0, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocks(30) + require.Nil(t, err) + + // Step 1 --- add a new validator key in the chain simulator + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + + newValidatorOwner := "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl" + newValidatorOwnerBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(newValidatorOwner) + rcv := "erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqplllst77y4l" + rcvAddrBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(rcv) + + // Step 2 --- set an initial balance for the address that will initialize all the transactions + err = cs.SetStateMultiple([]*dtos.AddressState{ + { + Address: "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl", + Balance: "10000000000000000000000", + }, + }) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + // Step 3 --- generate and send a stake transaction with the BLS key of the validator key that was added at step 1 + stakeValue, _ := big.NewInt(0).SetString("2500000000000000000000", 10) + tx := &transaction.Transaction{ + Nonce: 0, + Value: stakeValue, + SndAddr: newValidatorOwnerBytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("stake@01@%s@010101", blsKeys[0])), + GasLimit: 50_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(newValidatorOwnerBytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(newValidatorOwner, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeActiveValidator := accountValidatorOwner.Balance + + // Step 5 --- create an unStake transaction with the bls key of an initial validator and execute the transaction to make place for the validator that was added at step 3 + firstValidatorKey, err := cs.GetValidatorPrivateKeys()[0].GeneratePublic().ToByteArray() + require.Nil(t, err) + + initialAddressWithValidators := cs.GetInitialWalletKeys().StakeWallets[0].Address + shardID := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(initialAddressWithValidators.Bytes) + initialAccount, _, err := cs.GetNodeHandler(shardID).GetFacadeHandler().GetAccount(initialAddressWithValidators.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + tx = &transaction.Transaction{ + Nonce: initialAccount.Nonce, + Value: big.NewInt(0), + SndAddr: initialAddressWithValidators.Bytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("unStake@%s", hex.EncodeToString(firstValidatorKey))), + GasLimit: 50_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + + // Step 6 --- generate 8 epochs to get rewards + err = cs.GenerateBlocksUntilEpochIsReached(8) + require.Nil(t, err) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + validatorStatistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + checkValidatorsRating(t, validatorStatistics) + + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(newValidatorOwner, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterActiveValidator := accountValidatorOwner.Balance + + log.Info("balance before validator", "value", balanceBeforeActiveValidator) + log.Info("balance after validator", "value", balanceAfterActiveValidator) + + balanceBeforeBig, _ := big.NewInt(0).SetString(balanceBeforeActiveValidator, 10) + balanceAfterBig, _ := big.NewInt(0).SetString(balanceAfterActiveValidator, 10) + diff := balanceAfterBig.Sub(balanceAfterBig, balanceBeforeBig) + log.Info("difference", "value", diff.String()) + + // Step 7 --- check the balance of the validator owner has been increased + require.True(t, diff.Cmp(big.NewInt(0)) > 0) +} + +func TestChainSimulator_AddANewValidatorAfterStakingV4(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 100, + MetaChainMinNodes: 100, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + cfg.GeneralConfig.ValidatorStatistics.CacheRefreshIntervalInSec = 1 + eligibleNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + // 8 nodes until new nodes will be placed on queue + waitingNodes := uint32(8) + configs.SetMaxNumberOfNodesInConfigs(cfg, uint32(eligibleNodes), waitingNodes, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocks(150) + require.Nil(t, err) + + // Step 1 --- add a new validator key in the chain simulator + numOfNodes := 20 + validatorSecretKeysBytes, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(numOfNodes) + require.Nil(t, err) + err = cs.AddValidatorKeys(validatorSecretKeysBytes) + require.Nil(t, err) + + newValidatorOwner := "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl" + newValidatorOwnerBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(newValidatorOwner) + rcv := "erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqplllst77y4l" + rcvAddrBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(rcv) + + // Step 2 --- set an initial balance for the address that will initialize all the transactions + err = cs.SetStateMultiple([]*dtos.AddressState{ + { + Address: "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl", + Balance: "1000000000000000000000000", + }, + }) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + // Step 3 --- generate and send a stake transaction with the BLS keys of the validators key that were added at step 1 + validatorData := "" + for _, blsKey := range blsKeys { + validatorData += fmt.Sprintf("@%s@010101", blsKey) + } + + numOfNodesHex := hex.EncodeToString(big.NewInt(int64(numOfNodes)).Bytes()) + stakeValue, _ := big.NewInt(0).SetString("51000000000000000000000", 10) + tx := &transaction.Transaction{ + Nonce: 0, + Value: stakeValue, + SndAddr: newValidatorOwnerBytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("stake@%s%s", numOfNodesHex, validatorData)), + GasLimit: 500_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + + txFromNetwork, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txFromNetwork) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + results, err := metachainNode.GetFacadeHandler().AuctionListApi() + require.Nil(t, err) + require.Equal(t, newValidatorOwner, results[0].Owner) + require.Equal(t, 20, len(results[0].Nodes)) + checkTotalQualified(t, results, 8) + + err = cs.GenerateBlocks(100) + require.Nil(t, err) + + results, err = cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().AuctionListApi() + require.Nil(t, err) + checkTotalQualified(t, results, 0) +} + +// Internal test scenario #4 #5 #6 +// do stake +// do unStake +// do unBondNodes +// do unBondTokens +func TestChainSimulatorStakeUnStakeUnBond(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 1) + }) + + t.Run("staking ph 4 step 1 active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 4) + }) + + t.Run("staking ph 4 step 2 active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 5) + }) + + t.Run("staking ph 4 step 3 active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 6) + }) +} + +func testStakeUnStakeUnBond(t *testing.T, targetEpoch int32) { + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriod = 1 + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 1 + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 10 + configs.SetMaxNumberOfNodesInConfigs(cfg, uint32(newNumNodes), 0, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + + mintValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(2600)) + walletAddressShardID := uint32(0) + walletAddress, err := cs.GenerateAndMintWalletAddress(walletAddressShardID, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := chainSimulatorIntegrationTests.GenerateTransaction(walletAddress.Bytes, 0, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.MinimumStakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + bls0, _ := hex.DecodeString(blsKeys[0]) + blsKeyStatus := staking.GetBLSKeyStatus(t, metachainNode, bls0) + require.Equal(t, "staked", blsKeyStatus) + + // do unStake + txUnStake := chainSimulatorIntegrationTests.GenerateTransaction(walletAddress.Bytes, 1, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, fmt.Sprintf("unStake@%s", blsKeys[0]), staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + blsKeyStatus = staking.GetBLSKeyStatus(t, metachainNode, bls0) + require.Equal(t, "unStaked", blsKeyStatus) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + // do unBond + txUnBond := chainSimulatorIntegrationTests.GenerateTransaction(walletAddress.Bytes, 2, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, fmt.Sprintf("unBondNodes@%s", blsKeys[0]), staking.GasLimitForStakeOperation) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + // do claim + txClaim := chainSimulatorIntegrationTests.GenerateTransaction(walletAddress.Bytes, 3, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, "unBondTokens", staking.GasLimitForStakeOperation) + claimTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txClaim, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, claimTx) + + err = cs.GenerateBlocks(5) + require.Nil(t, err) + + // check tokens are in the wallet balance + walletAccount, _, err := cs.GetNodeHandler(walletAddressShardID).GetFacadeHandler().GetAccount(walletAddress.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + walletBalanceBig, _ := big.NewInt(0).SetString(walletAccount.Balance, 10) + require.True(t, walletBalanceBig.Cmp(chainSimulatorIntegrationTests.MinimumStakeValue) > 0) +} + +func checkTotalQualified(t *testing.T, auctionList []*common.AuctionListValidatorAPIResponse, expected int) { + totalQualified := 0 + for _, res := range auctionList { + for _, node := range res.Nodes { + if node.Qualified { + totalQualified++ + } + } + } + require.Equal(t, expected, totalQualified) +} + +func checkValidatorsRating(t *testing.T, validatorStatistics map[string]*validator.ValidatorStatistics) { + countRatingIncreased := 0 + for _, validatorInfo := range validatorStatistics { + validatorSignedAtLeastOneBlock := validatorInfo.NumValidatorSuccess > 0 || validatorInfo.NumLeaderSuccess > 0 + if !validatorSignedAtLeastOneBlock { + continue + } + countRatingIncreased++ + require.Greater(t, validatorInfo.TempRating, validatorInfo.Rating) + } + require.Greater(t, countRatingIncreased, 0) +} + +// Test description +// Stake funds - happy flow +// +// Preconditions: have an account with egld and 2 staked nodes (2500 stake per node) - directly staked, and no unstake +// +// 1. Check the stake amount for the owner of the staked nodes with the vmquery "getTotalStaked", and the account current EGLD balance +// 2. Create from the owner of staked nodes a transaction to stake 1 EGLD and send it to the network +// 3. Check the outcome of the TX & verify new stake state with vmquery + +// Internal test scenario #24 +func TestChainSimulator_DirectStakingNodes_StakeFunds(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedNodesStakingFunds(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Preconditions. Have an account with 2 staked nodes") + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(5010) + mintValue = mintValue.Mul(chainSimulatorIntegrationTests.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Set(chainSimulatorIntegrationTests.MinimumStakeValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + stakeValue = big.NewInt(0).Set(chainSimulatorIntegrationTests.MinimumStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], staking.MockBLSSignature) + txStake = chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + log.Info("Step 1. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 2. Create from the owner of the staked nodes a tx to stake 1 EGLD") + + stakeValue = big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(1)) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake = chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + log.Info("Step 3. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5001) +} + +func checkExpectedStakedValue(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte, expectedValue int64) { + totalStaked := getTotalStaked(t, metachainNode, blsKey) + + expectedStaked := big.NewInt(expectedValue) + expectedStaked = expectedStaked.Mul(chainSimulatorIntegrationTests.OneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(totalStaked)) +} + +func getTotalStaked(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) []byte { + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.OkReturnCode, result.ReturnCode) + + return result.ReturnData[0] +} + +// Test description: +// Unstake funds with deactivation of node if below 2500 -> the rest of funds are distributed as topup at epoch change +// +// Internal test scenario #26 +func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Check the stake amount and number of nodes for the owner of the staked nodes with the vmquery "getTotalStaked", and the account current EGLD balance + // 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network + // 3. Check the outcome of the TX & verify new stake state with vmquery "getTotalStaked" and "getUnStakedTokensList" + // 4. Wait for change of epoch and check the outcome + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(5010) + mintValue = mintValue.Mul(chainSimulatorIntegrationTests.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Set(chainSimulatorIntegrationTests.MinimumStakeValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + stakeValue = big.NewInt(0).Set(chainSimulatorIntegrationTests.MinimumStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], staking.MockBLSSignature) + txStake = chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[1]) + + log.Info("Step 1. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network") + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(chainSimulatorIntegrationTests.OneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 3. Check the outcome of the TX & verify new stake state with vmquery getTotalStaked and getUnStakedTokensList") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 4990) + + unStakedTokensAmount := getUnStakedTokensList(t, metachainNode, validatorOwner.Bytes) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(chainSimulatorIntegrationTests.OneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(unStakedTokensAmount).String()) + + log.Info("Step 4. Wait for change of epoch and check the outcome") + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + checkOneOfTheNodesIsUnstaked(t, metachainNode, blsKeys[:2]) +} + +func getUnStakedTokensList(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) []byte { + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.OkReturnCode, result.ReturnCode) + + return result.ReturnData[0] +} + +func checkOneOfTheNodesIsUnstaked(t *testing.T, + metachainNode chainSimulatorProcess.NodeHandler, + blsKeys []string, +) { + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + keyStatus0 := staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + log.Info("Key info", "key", blsKeys[0], "status", keyStatus0) + + isNotStaked0 := keyStatus0 == staking.UnStakedStatus + + decodedBLSKey1, _ := hex.DecodeString(blsKeys[1]) + keyStatus1 := staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey1) + log.Info("Key info", "key", blsKeys[1], "status", keyStatus1) + + isNotStaked1 := keyStatus1 == staking.UnStakedStatus + + require.True(t, isNotStaked0 != isNotStaked1) +} + +func testBLSKeyStaked(t *testing.T, + metachainNode chainSimulatorProcess.NodeHandler, + blsKey string, +) { + decodedBLSKey, _ := hex.DecodeString(blsKey) + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + + validatorStatistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + + activationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if activationEpoch <= metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + require.Equal(t, staking.StakedStatus, staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey)) + return + } + + // in staking ph 2/3.5 we do not find the bls key on the validator statistics + _, found := validatorStatistics[blsKey] + require.False(t, found) + require.Equal(t, staking.QueuedStatus, staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey)) +} + +// Test description: +// Unstake funds with deactivation of node, followed by stake with sufficient ammount does not unstake node at end of epoch +// +// Internal test scenario #27 +func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation_WithReactivation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Check the stake amount and number of nodes for the owner of the staked nodes with the vmquery "getTotalStaked", and the account current EGLD balance + // 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network + // 3. Check the outcome of the TX & verify new stake state with vmquery + // 4. Create from the owner of staked nodes a transaction to stake 10 EGLD and send it to the network + // 5. Check the outcome of the TX & verify new stake state with vmquery + // 6. Wait for change of epoch and check the outcome + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(6000) + mintValue = mintValue.Mul(chainSimulatorIntegrationTests.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Set(chainSimulatorIntegrationTests.MinimumStakeValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + stakeValue = big.NewInt(0).Set(chainSimulatorIntegrationTests.MinimumStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], staking.MockBLSSignature) + txStake = chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[1]) + + log.Info("Step 1. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network") + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(chainSimulatorIntegrationTests.OneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 3. Check the outcome of the TX & verify new stake state with vmquery getTotalStaked and getUnStakedTokensList") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 4990) + + unStakedTokensAmount := getUnStakedTokensList(t, metachainNode, validatorOwner.Bytes) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(chainSimulatorIntegrationTests.OneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(unStakedTokensAmount).String()) + + log.Info("Step 4. Create from the owner of staked nodes a transaction to stake 10 EGLD and send it to the network") + + newStakeValue := big.NewInt(10) + newStakeValue = newStakeValue.Mul(chainSimulatorIntegrationTests.OneEGLD, newStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake = chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 3, vm.ValidatorSCAddress, newStakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("5. Check the outcome of the TX & verify new stake state with vmquery") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 6. Wait for change of epoch and check the outcome") + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + testBLSKeyStaked(t, metachainNode, blsKeys[1]) +} + +// Test description: +// Withdraw unstaked funds before unbonding period should return error +// +// Internal test scenario #28 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedFundsBeforeUnbonding(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds + // 2. Check the outcome of the TX & verify new stake state with vmquery ("getUnStakedTokensList") + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(10000) + mintValue = mintValue.Mul(chainSimulatorIntegrationTests.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + log.Info("Step 1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(chainSimulatorIntegrationTests.OneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // check bls key is still staked + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 2. Check the outcome of the TX & verify new stake state with vmquery (`getUnStakedTokensList`)") + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.OkReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(chainSimulatorIntegrationTests.OneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + // the owner balance should decrease only with the txs fee + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + txsFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + balanceAfterUnbondingWithFee := big.NewInt(0).Add(balanceAfterUnbonding, txsFee) + + txsFee, _ = big.NewInt(0).SetString(unStakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + txsFee, _ = big.NewInt(0).SetString(stakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + require.Equal(t, 1, balanceAfterUnbondingWithFee.Cmp(balanceBeforeUnbonding)) +} + +// Test description: +// Withdraw unstaked funds in first available withdraw epoch +// +// Internal test scenario #29 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedInWithdrawEpoch(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Wait for the unbonding epoch to start + // 2. Create from the owner of staked nodes a transaction to withdraw the unstaked funds + // 3. Check the outcome of the TX & verify new stake state with vmquery ("getUnStakedTokensList") + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(10000) + mintValue = mintValue.Mul(chainSimulatorIntegrationTests.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(chainSimulatorIntegrationTests.OneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // check bls key is still staked + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.OkReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(chainSimulatorIntegrationTests.OneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + log.Info("Step 1. Wait for the unbonding epoch to start") + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + log.Info("Step 2. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 3. Check the outcome of the TX & verify new stake state with vmquery (`getUnStakedTokensList`)") + + scQuery = &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err = metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.OkReturnCode, result.ReturnCode) + + expectedStaked := big.NewInt(2590) + expectedStaked = expectedStaked.Mul(chainSimulatorIntegrationTests.OneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(result.ReturnData[0])) + + // the owner balance should increase with the (10 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + // substract unbonding value + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue) + + txsFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + balanceAfterUnbondingWithFee := big.NewInt(0).Add(balanceAfterUnbonding, txsFee) + + txsFee, _ = big.NewInt(0).SetString(unStakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + txsFee, _ = big.NewInt(0).SetString(stakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + require.Equal(t, 1, balanceAfterUnbondingWithFee.Cmp(balanceBeforeUnbonding)) +} + +// Test description: +// Unstaking funds in different batches allows correct withdrawal for each batch +// at the corresponding epoch. +// +// Internal test scenario #30 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedInBatches(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Create 3 transactions for unstaking: first one unstaking 11 egld each, second one unstaking 12 egld and third one unstaking 13 egld. + // 2. Send the transactions in consecutive epochs, one TX in each epoch. + // 3. Wait for the epoch when first tx unbonding period ends. + // 4. Create a transaction for withdraw and send it to the network + // 5. Wait for an epoch + // 6. Create another transaction for withdraw and send it to the network + // 7. Wait for an epoch + // 8. Create another transasction for withdraw and send it to the network + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(2700) + mintValue = mintValue.Mul(chainSimulatorIntegrationTests.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + stakeTxFee, _ := big.NewInt(0).SetString(stakeTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + log.Info("Step 1. Create 3 transactions for unstaking: first one unstaking 11 egld, second one unstaking 12 egld and third one unstaking 13 egld.") + log.Info("Step 2. Send the transactions in consecutive epochs, one TX in each epoch.") + + unStakeValue1 := big.NewInt(11) + unStakeValue1 = unStakeValue1.Mul(chainSimulatorIntegrationTests.OneEGLD, unStakeValue1) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue1.Bytes())) + txUnStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + unStakeTxFee, _ := big.NewInt(0).SetString(unStakeTx.Fee, 10) + + testEpoch := targetEpoch + 1 + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + unStakeValue2 := big.NewInt(12) + unStakeValue2 = unStakeValue2.Mul(chainSimulatorIntegrationTests.OneEGLD, unStakeValue2) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue2.Bytes())) + txUnStake = chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + unStakeValue3 := big.NewInt(13) + unStakeValue3 = unStakeValue3.Mul(chainSimulatorIntegrationTests.OneEGLD, unStakeValue3) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue3.Bytes())) + txUnStake = chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 3, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + // check bls key is still staked + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.OkReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(11) + expectedUnStaked = expectedUnStaked.Mul(chainSimulatorIntegrationTests.OneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + scQuery = &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err = metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.OkReturnCode, result.ReturnCode) + + expectedStaked := big.NewInt(2600 - 11 - 12 - 13) + expectedStaked = expectedStaked.Mul(chainSimulatorIntegrationTests.OneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(result.ReturnData[0])) + + log.Info("Step 3. Wait for the unbonding epoch to start") + + testEpoch += 3 + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + log.Info("Step 4.1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 4, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + unBondTxFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + + txsFee := big.NewInt(0) + + txsFee.Add(txsFee, stakeTxFee) + txsFee.Add(txsFee, unBondTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) + + log.Info("Step 4.2. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond = chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 5, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForUnBond) + unBondTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11+12 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ = big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue2) + + txsFee.Add(txsFee, unBondTxFee) + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) + + log.Info("Step 4.3. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond = chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 6, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForUnBond) + unBondTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11+12+13 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ = big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue2) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue3) + + txsFee.Add(txsFee, unBondTxFee) + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) +} + +// Test description: +// Unstake funds in different batches in the same epoch allows correct withdrawal in the correct epoch +// +// Internal test scenario #31 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedInEpoch(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Create 3 transactions for unstaking: first one unstaking 11 egld each, second one unstaking 12 egld and third one unstaking 13 egld. + // 2. Send the transactions consecutively in the same epoch + // 3. Wait for the epoch when unbonding period ends. + // 4. Create a transaction for withdraw and send it to the network + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(2700) + mintValue = mintValue.Mul(chainSimulatorIntegrationTests.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + stakeTxFee, _ := big.NewInt(0).SetString(stakeTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + log.Info("Step 1. Create 3 transactions for unstaking: first one unstaking 11 egld each, second one unstaking 12 egld and third one unstaking 13 egld.") + log.Info("Step 2. Send the transactions in consecutively in same epoch.") + + unStakeValue1 := big.NewInt(11) + unStakeValue1 = unStakeValue1.Mul(chainSimulatorIntegrationTests.OneEGLD, unStakeValue1) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue1.Bytes())) + txUnStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + unStakeTxFee, _ := big.NewInt(0).SetString(unStakeTx.Fee, 10) + + unStakeValue2 := big.NewInt(12) + unStakeValue2 = unStakeValue2.Mul(chainSimulatorIntegrationTests.OneEGLD, unStakeValue2) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue2.Bytes())) + txUnStake = chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + unStakeValue3 := big.NewInt(13) + unStakeValue3 = unStakeValue3.Mul(chainSimulatorIntegrationTests.OneEGLD, unStakeValue3) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue3.Bytes())) + txUnStake = chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 3, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + // check bls key is still staked + testBLSKeyStaked(t, metachainNode, blsKeys[0]) + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.OkReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(11 + 12 + 13) + expectedUnStaked = expectedUnStaked.Mul(chainSimulatorIntegrationTests.OneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + scQuery = &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err = metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.OkReturnCode, result.ReturnCode) + + expectedStaked := big.NewInt(2600 - 11 - 12 - 13) + expectedStaked = expectedStaked.Mul(chainSimulatorIntegrationTests.OneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(result.ReturnData[0])) + + log.Info("Step 3. Wait for the unbonding epoch to start") + + testEpoch := targetEpoch + 3 + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + log.Info("Step 4.1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 4, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, staking.GasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + unBondTxFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11+12+13 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue2) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue3) + + txsFee := big.NewInt(0) + + txsFee.Add(txsFee, stakeTxFee) + txsFee.Add(txsFee, unBondTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) +} + +// Test that if we unStake one active node(waiting/eligible), the number of qualified nodes will remain the same +// Nodes configuration at genesis consisting of a total of 32 nodes, distributed on 3 shards + meta: +// - 4 eligible nodes/shard +// - 4 waiting nodes/shard +// - 2 nodes to shuffle per shard +// - max num nodes config for stakingV4 step3 = 24 (being downsized from previously 32 nodes) +// - with this config, we should always select 8 nodes from auction list +// We will add one extra node, so auction list size = 9, but will always select 8. Even if we unStake one active node, +// we should still only select 8 nodes. +func TestChainSimulator_UnStakeOneActiveNodeAndCheckAPIAuctionList(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + stakingV4Step1Epoch := uint32(2) + stakingV4Step2Epoch := uint32(3) + stakingV4Step3Epoch := uint32(4) + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 4, + MetaChainMinNodes: 4, + NumNodesWaitingListMeta: 4, + NumNodesWaitingListShard: 4, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = stakingV4Step1Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = stakingV4Step2Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = stakingV4Step3Epoch + cfg.EpochConfig.EnableEpochs.CleanupAuctionOnLowWaitingListEnableEpoch = stakingV4Step1Epoch + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].MaxNumNodes = 32 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].NodesToShufflePerShard = 2 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = stakingV4Step3Epoch + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].MaxNumNodes = 24 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].NodesToShufflePerShard = 2 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocksUntilEpochIsReached(int32(stakingV4Step3Epoch + 1)) + require.Nil(t, err) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + qualified, unQualified := getQualifiedAndUnqualifiedNodes(t, metachainNode) + require.Equal(t, 8, len(qualified)) + require.Equal(t, 0, len(unQualified)) + + stakeOneNode(t, cs) + + qualified, unQualified = getQualifiedAndUnqualifiedNodes(t, metachainNode) + require.Equal(t, 8, len(qualified)) + require.Equal(t, 1, len(unQualified)) + + unStakeOneActiveNode(t, cs) + + qualified, unQualified = getQualifiedAndUnqualifiedNodes(t, metachainNode) + require.Equal(t, 8, len(qualified)) + require.Equal(t, 1, len(unQualified)) +} + +// Nodes configuration at genesis consisting of a total of 40 nodes, distributed on 3 shards + meta: +// - 4 eligible nodes/shard +// - 4 waiting nodes/shard +// - 2 nodes to shuffle per shard +// - max num nodes config for stakingV4 step3 = 32 (being downsized from previously 40 nodes) +// - with this config, we should always select max 8 nodes from auction list if there are > 40 nodes in the network +// This test will run with only 32 nodes and check that there are no nodes in the auction list, +// because of the lowWaitingList condition being triggered when in staking v4 +func TestChainSimulator_EdgeCaseLowWaitingList(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + stakingV4Step1Epoch := uint32(2) + stakingV4Step2Epoch := uint32(3) + stakingV4Step3Epoch := uint32(4) + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 4, + MetaChainMinNodes: 4, + NumNodesWaitingListMeta: 2, + NumNodesWaitingListShard: 2, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = stakingV4Step1Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = stakingV4Step2Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = stakingV4Step3Epoch + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].MaxNumNodes = 40 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].NodesToShufflePerShard = 2 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = stakingV4Step3Epoch + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].MaxNumNodes = 32 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].NodesToShufflePerShard = 2 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + epochToCheck := int32(stakingV4Step3Epoch + 1) + err = cs.GenerateBlocksUntilEpochIsReached(epochToCheck) + require.Nil(t, err) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + qualified, unQualified := getQualifiedAndUnqualifiedNodes(t, metachainNode) + require.Equal(t, 0, len(qualified)) + require.Equal(t, 0, len(unQualified)) + + // we always have 0 in auction list because of the lowWaitingList condition + epochToCheck += 1 + err = cs.GenerateBlocksUntilEpochIsReached(epochToCheck) + require.Nil(t, err) + + qualified, unQualified = getQualifiedAndUnqualifiedNodes(t, metachainNode) + require.Equal(t, 0, len(qualified)) + require.Equal(t, 0, len(unQualified)) + + // stake 16 mode nodes, these will go to auction list + stakeNodes(t, cs, 17) + + epochToCheck += 1 + err = cs.GenerateBlocksUntilEpochIsReached(epochToCheck) + require.Nil(t, err) + + qualified, unQualified = getQualifiedAndUnqualifiedNodes(t, metachainNode) + // all the previously registered will be selected, as we have 24 nodes in eligible+waiting, 8 will shuffle out, + // but this time there will be not be lowWaitingList, as there are enough in auction, so we will end up with + // 24-8 = 16 nodes remaining + 16 from auction, to fill up all 32 positions + require.Equal(t, 16, len(qualified)) + require.Equal(t, 1, len(unQualified)) + + shuffledOutNodesKeys, err := metachainNode.GetProcessComponents().NodesCoordinator().GetShuffledOutToAuctionValidatorsPublicKeys(uint32(epochToCheck)) + require.Nil(t, err) + + checkKeysNotInMap(t, shuffledOutNodesKeys, qualified) + checkKeysNotInMap(t, shuffledOutNodesKeys, unQualified) +} + +func checkKeysNotInMap(t *testing.T, m map[uint32][][]byte, keys []string) { + for _, key := range keys { + for _, v := range m { + for _, k := range v { + mapKey := hex.EncodeToString(k) + require.NotEqual(t, key, mapKey) + } + } + } +} + +func stakeNodes(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, numNodesToStake int) { + txs := make([]*transaction.Transaction, numNodesToStake) + for i := 0; i < numNodesToStake; i++ { + txs[i] = createStakeTransaction(t, cs) + } + + stakeTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted(txs, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTxs) + require.Len(t, stakeTxs, numNodesToStake) + + require.Nil(t, cs.GenerateBlocks(1)) +} + +func stakeOneNode(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator) { + txStake := createStakeTransaction(t, cs) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + require.Nil(t, cs.GenerateBlocks(1)) +} + +func createStakeTransaction(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator) *transaction.Transaction { + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + + mintValue := big.NewInt(0).Add(chainSimulatorIntegrationTests.MinimumStakeValue, chainSimulatorIntegrationTests.OneEGLD) + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + return chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, chainSimulatorIntegrationTests.MinimumStakeValue, txDataField, staking.GasLimitForStakeOperation) +} + +func unStakeOneActiveNode(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator) { + err := cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + + validators, err := cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + + idx := 0 + keyToUnStake := make([]byte, 0) + numKeys := len(cs.GetValidatorPrivateKeys()) + for idx = 0; idx < numKeys; idx++ { + keyToUnStake, err = cs.GetValidatorPrivateKeys()[idx].GeneratePublic().ToByteArray() + require.Nil(t, err) + + apiValidator, found := validators[hex.EncodeToString(keyToUnStake)] + require.True(t, found) + + validatorStatus := apiValidator.ValidatorStatus + if validatorStatus == "waiting" || validatorStatus == "eligible" { + log.Info("found active key to unStake", "index", idx, "bls key", keyToUnStake, "list", validatorStatus) + break + } + + if idx == numKeys-1 { + require.Fail(t, "did not find key to unStake") + } + } + + rcv := "erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqplllst77y4l" + rcvAddrBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(rcv) + + validatorWallet := cs.GetInitialWalletKeys().StakeWallets[idx].Address + shardID := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorWallet.Bytes) + initialAccount, _, err := cs.GetNodeHandler(shardID).GetFacadeHandler().GetAccount(validatorWallet.Bech32, coreAPI.AccountQueryOptions{}) + + require.Nil(t, err) + tx := &transaction.Transaction{ + Nonce: initialAccount.Nonce, + Value: big.NewInt(0), + SndAddr: validatorWallet.Bytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("unStake@%s", hex.EncodeToString(keyToUnStake))), + GasLimit: 50_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + err = cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + validators, err = cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + + apiValidator, found := validators[hex.EncodeToString(keyToUnStake)] + require.True(t, found) + require.True(t, strings.Contains(apiValidator.ValidatorStatus, "leaving")) +} diff --git a/integrationTests/chainSimulator/staking/stakingProvider/delegation_test.go b/integrationTests/chainSimulator/staking/stakingProvider/delegation_test.go new file mode 100644 index 00000000000..423faa3fbab --- /dev/null +++ b/integrationTests/chainSimulator/staking/stakingProvider/delegation_test.go @@ -0,0 +1,2073 @@ +package stakingProvider + +import ( + "encoding/hex" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator/staking" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/vm" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" + dataVm "github.com/multiversx/mx-chain-core-go/data/vm" + "github.com/multiversx/mx-chain-crypto-go/signing" + "github.com/multiversx/mx-chain-crypto-go/signing/mcl" + mclsig "github.com/multiversx/mx-chain-crypto-go/signing/mcl/singlesig" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var log = logger.GetOrCreate("stakingProvider") + +const gasLimitForConvertOperation = 510_000_000 +const gasLimitForDelegationContractCreationOperation = 500_000_000 +const gasLimitForAddNodesOperation = 500_000_000 +const gasLimitForUndelegateOperation = 500_000_000 +const gasLimitForMergeOperation = 600_000_000 +const gasLimitForDelegate = 12_000_000 + +const maxCap = "00" // no cap +const hexServiceFee = "0ea1" // 37.45% + +// Test description: +// Test that delegation contract created with MakeNewContractFromValidatorData works properly +// Also check that delegate and undelegate works properly and the top-up remain the same if every delegator undelegates. +// Test that the top-up from normal stake will be transferred after creating the contract and will be used in auction list computing + +// Internal test scenario #10 +func TestChainSimulator_MakeNewContractFromValidatorData(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test scenario done in staking 3.5 phase (staking v4 is not active) + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on queue and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on queue and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + maxNodesChangeEnableEpoch := cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch + blsMultiSignerEnableEpoch := cfg.EpochConfig.EnableEpochs.BLSMultiSignerEnableEpoch + + cfg.EpochConfig.EnableEpochs = config.EnableEpochs{} + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch = maxNodesChangeEnableEpoch + cfg.EpochConfig.EnableEpochs.BLSMultiSignerEnableEpoch = blsMultiSignerEnableEpoch + + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 1) + }) + + // Test scenario done in staking 3.5 phase (staking v4 is not active) + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on queue and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on queue and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 is not active and all is done in epoch 0", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + maxNodesChangeEnableEpoch := cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch + blsMultiSignerEnableEpoch := cfg.EpochConfig.EnableEpochs.BLSMultiSignerEnableEpoch + + // set all activation epoch values on 0 + cfg.EpochConfig.EnableEpochs = config.EnableEpochs{} + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch = maxNodesChangeEnableEpoch + cfg.EpochConfig.EnableEpochs.BLSMultiSignerEnableEpoch = blsMultiSignerEnableEpoch + + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + // we need a little time to enable the VM queries on the http server + time.Sleep(time.Second) + // also, propose a couple of blocks + err = cs.GenerateBlocks(3) + require.Nil(t, err) + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 0) + }) + + // Test scenario done in staking v4 phase step 1 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on auction list and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on auction list and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 2) + }) + + // Test scenario done in staking v4 phase step 2 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on auction list and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on auction list and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 3) + }) + + // Test scenario done in staking v4 phase step 3 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on auction list and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on auction list and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 4) + }) +} + +func testChainSimulatorMakeNewContractFromValidatorData(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Step 1. Add a new validator private key in the multi key handler") + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + log.Info("Step 2. Set the initial state for the owner and the 2 delegators") + mintValue := big.NewInt(3010) + mintValue = mintValue.Mul(chainSimulatorIntegrationTests.OneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + delegator1, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + delegator2, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + log.Info("working with the following addresses", + "newValidatorOwner", validatorOwner.Bech32, "delegator1", delegator1.Bech32, "delegator2", delegator2.Bech32) + + log.Info("Step 3. Do a stake transaction for the validator key and test that the new key is on queue / auction list and the correct topup") + stakeValue := big.NewInt(0).Set(chainSimulatorIntegrationTests.MinimumStakeValue) + addedStakedValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(500)) + stakeValue.Add(stakeValue, addedStakedValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorOwner.Bytes, blsKeys[0], addedStakedValue, 1) + + log.Info("Step 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on queue / auction list and the correct topup") + txDataField = fmt.Sprintf("makeNewContractFromValidatorData@%s@%s", maxCap, hexServiceFee) + txConvert := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 1, vm.DelegationManagerSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, gasLimitForConvertOperation) + convertTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txConvert, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, convertTx) + + delegationAddress := convertTx.Logs.Events[0].Topics[1] + delegationAddressBech32 := metachainNode.GetCoreComponents().AddressPubKeyConverter().SilentEncode(delegationAddress, log) + log.Info("generated delegation address", "address", delegationAddressBech32) + + err = cs.ForceResetValidatorStatisticsCache() + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], addedStakedValue, 1) + + log.Info("Step 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700") + delegateValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(100)) + txDelegate1 := chainSimulatorIntegrationTests.GenerateTransaction(delegator1.Bytes, 0, delegationAddress, delegateValue, "delegate", gasLimitForDelegate) + delegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate1, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate1Tx) + + txDelegate2 := chainSimulatorIntegrationTests.GenerateTransaction(delegator2.Bytes, 0, delegationAddress, delegateValue, "delegate", gasLimitForDelegate) + delegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate2, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate2Tx) + + expectedTopUp := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(700)) + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], expectedTopUp, 1) + + log.Info("6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500") + unDelegateValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(100)) + txDataField = fmt.Sprintf("unDelegate@%s", hex.EncodeToString(unDelegateValue.Bytes())) + txUnDelegate1 := chainSimulatorIntegrationTests.GenerateTransaction(delegator1.Bytes, 1, delegationAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, gasLimitForDelegate) + unDelegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnDelegate1, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unDelegate1Tx) + + txDataField = fmt.Sprintf("unDelegate@%s", hex.EncodeToString(unDelegateValue.Bytes())) + txUnDelegate2 := chainSimulatorIntegrationTests.GenerateTransaction(delegator2.Bytes, 1, delegationAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, gasLimitForDelegate) + unDelegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnDelegate2, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unDelegate2Tx) + + expectedTopUp = big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(500)) + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], expectedTopUp, 1) +} + +func testBLSKeyIsInQueueOrAuction(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte, blsKey string, expectedTopUp *big.Int, actionListSize int) { + decodedBLSKey, _ := hex.DecodeString(blsKey) + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + statistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + require.Equal(t, expectedTopUp.String(), getBLSTopUpValue(t, metachainNode, address).String()) + + activationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if activationEpoch <= metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + testBLSKeyIsInAuction(t, metachainNode, decodedBLSKey, blsKey, expectedTopUp, actionListSize, statistics, 1, address) + return + } + + // in staking ph 2/3.5 we do not find the bls key on the validator statistics + _, found := statistics[blsKey] + require.False(t, found) + require.Equal(t, staking.QueuedStatus, staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey)) +} + +func testBLSKeyIsInAuction( + t *testing.T, + metachainNode chainSimulatorProcess.NodeHandler, + blsKeyBytes []byte, + blsKey string, + topUpInAuctionList *big.Int, + actionListSize int, + validatorStatistics map[string]*validator.ValidatorStatistics, + numNodes int, + owner []byte, +) { + require.Equal(t, staking.StakedStatus, staking.GetBLSKeyStatus(t, metachainNode, blsKeyBytes)) + + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + + currentEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() + + shuffledToAuctionValPubKeys, err := metachainNode.GetProcessComponents().NodesCoordinator().GetShuffledOutToAuctionValidatorsPublicKeys(currentEpoch) + require.Nil(t, err) + + stakingV4Step2Epoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step2Flag) + stakingV4Step3Epoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step3Flag) + if stakingV4Step2Epoch == currentEpoch || stakingV4Step3Epoch == currentEpoch { + // starting from phase 2, we have the shuffled out nodes from the previous epoch in the action list + // if there is no lowWaitingList condition + if len(shuffledToAuctionValPubKeys) > 0 { + // there are 2 nodes per shard shuffled out so 2 * 4 = 8 + actionListSize += 8 + } + } + + require.Equal(t, actionListSize, len(auctionList)) + ownerAsBech32, err := metachainNode.GetCoreComponents().AddressPubKeyConverter().Encode(owner) + require.Nil(t, err) + if actionListSize != 0 { + nodeWasFound := false + for _, item := range auctionList { + if item.Owner != ownerAsBech32 { + continue + } + + require.Equal(t, numNodes, len(auctionList[0].Nodes)) + for _, node := range item.Nodes { + if node.BlsKey == blsKey { + require.Equal(t, topUpInAuctionList.String(), item.TopUpPerNode) + nodeWasFound = true + } + } + } + require.True(t, nodeWasFound) + } + + // in staking ph 4 we should find the key in the validators statics + validatorInfo, found := validatorStatistics[blsKey] + require.True(t, found) + require.Equal(t, staking.AuctionStatus, validatorInfo.ValidatorStatus) +} + +func testBLSKeysAreInQueueOrAuction(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte, blsKeys []string, totalTopUp *big.Int, actionListSize int) { + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + statistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + assert.Equal(t, totalTopUp, getBLSTopUpValue(t, metachainNode, address)) + + individualTopup := big.NewInt(0).Set(totalTopUp) + individualTopup.Div(individualTopup, big.NewInt(int64(len(blsKeys)))) + + for _, blsKey := range blsKeys { + decodedBLSKey, _ := hex.DecodeString(blsKey) + activationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if activationEpoch <= metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + testBLSKeyIsInAuction(t, metachainNode, decodedBLSKey, blsKey, individualTopup, actionListSize, statistics, len(blsKeys), address) + continue + } + + // in staking ph 2/3.5 we do not find the bls key on the validator statistics + _, found := statistics[blsKey] + require.False(t, found) + require.Equal(t, staking.QueuedStatus, staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey)) + } +} + +// Test description: +// Test that 2 different contracts with different topups that came from the normal stake will be considered in auction list computing in the correct order +// 1. Add 2 new validator private keys in the multi key handler +// 2. Set the initial state for 2 owners (mint 2 new wallets) +// 3. Do 2 stake transactions and test that the new keys are on queue / auction list and have the correct topup - 100 and 200 EGLD, respectively +// 4. Convert both validators into staking providers and test that the new keys are on queue / auction list and have the correct topup +// 5. If the staking v4 is activated (regardless the steps), check that the auction list sorted the 2 BLS keys based on topup + +// Internal test scenario #11 +func TestChainSimulator_MakeNewContractFromValidatorDataWith2StakingContracts(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 1) + }) + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 2) + }) + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 3) + }) + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 4) + }) +} + +func testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Step 1. Add 2 new validator private keys in the multi key handler") + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + log.Info("Step 2. Set the initial state for 2 owners") + mintValue := big.NewInt(3010) + mintValue = mintValue.Mul(chainSimulatorIntegrationTests.OneEGLD, mintValue) + + validatorOwnerA, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + validatorOwnerB, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + log.Info("working with the following addresses", + "validatorOwnerA", validatorOwnerA.Bech32, "validatorOwnerB", validatorOwnerB.Bech32) + + log.Info("Step 3. Do 2 stake transactions and test that the new keys are on queue / auction list and have the correct topup") + + topupA := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(100)) + stakeValueA := big.NewInt(0).Add(chainSimulatorIntegrationTests.MinimumStakeValue, topupA) + txStakeA := generateStakeTransaction(t, cs, validatorOwnerA, blsKeys[0], stakeValueA) + + topupB := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(200)) + stakeValueB := big.NewInt(0).Add(chainSimulatorIntegrationTests.MinimumStakeValue, topupB) + txStakeB := generateStakeTransaction(t, cs, validatorOwnerB, blsKeys[1], stakeValueB) + + stakeTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStakeA, txStakeB}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 2, len(stakeTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorOwnerA.Bytes, blsKeys[0], topupA, 2) + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorOwnerB.Bytes, blsKeys[1], topupB, 2) + + log.Info("Step 4. Convert both validators into staking providers and test that the new keys are on queue / auction list and have the correct topup") + + txConvertA := generateConvertToStakingProviderTransaction(t, cs, validatorOwnerA) + txConvertB := generateConvertToStakingProviderTransaction(t, cs, validatorOwnerB) + + convertTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txConvertA, txConvertB}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 2, len(convertTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + delegationAddressA := convertTxs[0].Logs.Events[0].Topics[1] + delegationAddressB := convertTxs[1].Logs.Events[0].Topics[1] + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddressA, blsKeys[0], topupA, 2) + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddressB, blsKeys[1], topupB, 2) + + log.Info("Step 5. If the staking v4 is activated, check that the auction list sorted the 2 BLS keys based on topup") + step1ActivationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if step1ActivationEpoch > metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + // we are in staking v3.5, the test ends here + return + } + + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + + firstAuctionPosition := auctionList[0] + secondAuctionPosition := auctionList[1] + // check the correct order of the nodes in the auction list based on topup + require.Equal(t, blsKeys[1], firstAuctionPosition.Nodes[0].BlsKey) + require.Equal(t, topupB.String(), firstAuctionPosition.TopUpPerNode) + + require.Equal(t, blsKeys[0], secondAuctionPosition.Nodes[0].BlsKey) + require.Equal(t, topupA.String(), secondAuctionPosition.TopUpPerNode) +} + +// Test description: +// Test that 1 contract having 3 BLS keys proper handles the stakeNodes-unstakeNodes-unBondNodes sequence for 2 of the BLS keys +// 1. Add 3 new validator private keys in the multi key handler +// 2. Set the initial state for 1 owner and 1 delegator +// 3. Do a stake transaction and test that the new key is on queue / auction list and has the correct topup +// 4. Convert the validator into a staking providers and test that the key is on queue / auction list and has the correct topup +// 5. Add 2 nodes in the staking contract +// 6. Delegate 5000 EGLD to the contract +// 7. Stake the 2 nodes +// 8. UnStake 2 nodes (latest staked) +// 9. Unbond the 2 nodes (that were un staked) + +// Internal test scenario #85 +func TestChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 80, + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + // unbond succeeded because the nodes were on queue + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 1, staking.NotStakedStatus) + }) + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 2, staking.UnStakedStatus) + }) + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 3, staking.UnStakedStatus) + }) + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 4, staking.UnStakedStatus) + }) +} + +func testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond( + t *testing.T, + cs chainSimulatorIntegrationTests.ChainSimulator, + targetEpoch int32, + nodesStatusAfterUnBondTx string, +) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Step 1. Add 3 new validator private keys in the multi key handler") + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(3) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + log.Info("Step 2. Set the initial state for 1 owner and 1 delegator") + mintValue := big.NewInt(10001) + mintValue = mintValue.Mul(chainSimulatorIntegrationTests.OneEGLD, mintValue) + + owner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + delegator, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + log.Info("working with the following addresses", + "owner", owner.Bech32, "", delegator.Bech32) + + log.Info("Step 3. Do a stake transaction and test that the new key is on queue / auction list and has the correct topup") + + topup := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(99)) + stakeValue := big.NewInt(0).Add(chainSimulatorIntegrationTests.MinimumStakeValue, topup) + txStake := generateStakeTransaction(t, cs, owner, blsKeys[0], stakeValue) + + stakeTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStake}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(stakeTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, owner.Bytes, blsKeys[0], topup, 1) + + log.Info("Step 4. Convert the validator into a staking providers and test that the key is on queue / auction list and has the correct topup") + + txConvert := generateConvertToStakingProviderTransaction(t, cs, owner) + + convertTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txConvert}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(convertTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + delegationAddress := convertTxs[0].Logs.Events[0].Topics[1] + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], topup, 1) + + log.Info("Step 5. Add 2 nodes in the staking contract") + txDataFieldAddNodes := fmt.Sprintf("addNodes@%s@%s@%s@%s", blsKeys[1], staking.MockBLSSignature+"02", blsKeys[2], staking.MockBLSSignature+"03") + ownerNonce := staking.GetNonce(t, cs, owner) + txAddNodes := chainSimulatorIntegrationTests.GenerateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldAddNodes, staking.GasLimitForStakeOperation) + + addNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txAddNodes}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(addNodesTxs)) + + log.Info("Step 6. Delegate 5000 EGLD to the contract") + delegateValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(5000)) + txDataFieldDelegate := "delegate" + delegatorNonce := staking.GetNonce(t, cs, delegator) + txDelegate := chainSimulatorIntegrationTests.GenerateTransaction(delegator.Bytes, delegatorNonce, delegationAddress, delegateValue, txDataFieldDelegate, staking.GasLimitForStakeOperation) + + delegateTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txDelegate}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(delegateTxs)) + + log.Info("Step 7. Stake the 2 nodes") + txDataFieldStakeNodes := fmt.Sprintf("stakeNodes@%s@%s", blsKeys[1], blsKeys[2]) + ownerNonce = staking.GetNonce(t, cs, owner) + txStakeNodes := chainSimulatorIntegrationTests.GenerateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldStakeNodes, staking.GasLimitForStakeOperation) + + stakeNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStakeNodes}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(stakeNodesTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the nodes + assert.Nil(t, err) + + // all 3 nodes should be staked (auction list is 1 as there is one delegation SC with 3 BLS keys in the auction list) + testBLSKeysAreInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys, topup, 1) + + log.Info("Step 8. UnStake 2 nodes (latest staked)") + + txDataFieldUnStakeNodes := fmt.Sprintf("unStakeNodes@%s@%s", blsKeys[1], blsKeys[2]) + ownerNonce = staking.GetNonce(t, cs, owner) + txUnStakeNodes := chainSimulatorIntegrationTests.GenerateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldUnStakeNodes, staking.GasLimitForStakeOperation) + + unStakeNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txUnStakeNodes}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(unStakeNodesTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the nodes + assert.Nil(t, err) + + // all that only one node is staked (auction list is 1 as there is one delegation SC with 1 BLS key in the auction list) + expectedTopUp := big.NewInt(0) + expectedTopUp.Add(topup, delegateValue) // 99 + 5000 = 5099 + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], expectedTopUp, 1) + + log.Info("Step 9. Unbond the 2 nodes (that were un staked)") + + txDataFieldUnBondNodes := fmt.Sprintf("unBondNodes@%s@%s", blsKeys[1], blsKeys[2]) + ownerNonce = staking.GetNonce(t, cs, owner) + txUnBondNodes := chainSimulatorIntegrationTests.GenerateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldUnBondNodes, staking.GasLimitForStakeOperation) + + unBondNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txUnBondNodes}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(unBondNodesTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the nodes + assert.Nil(t, err) + + keyStatus := staking.GetAllNodeStates(t, metachainNode, delegationAddress) + require.Equal(t, len(blsKeys), len(keyStatus)) + // key[0] should be staked + require.Equal(t, staking.StakedStatus, keyStatus[blsKeys[0]]) + // key[1] and key[2] should be unstaked (unbond was not executed) + require.Equal(t, nodesStatusAfterUnBondTx, keyStatus[blsKeys[1]]) + require.Equal(t, nodesStatusAfterUnBondTx, keyStatus[blsKeys[2]]) +} + +func generateStakeTransaction( + t *testing.T, + cs chainSimulatorIntegrationTests.ChainSimulator, + owner dtos.WalletAddress, + blsKeyHex string, + stakeValue *big.Int, +) *transaction.Transaction { + account, err := cs.GetAccount(owner) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeyHex, staking.MockBLSSignature) + return chainSimulatorIntegrationTests.GenerateTransaction(owner.Bytes, account.Nonce, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) +} + +func generateConvertToStakingProviderTransaction( + t *testing.T, + cs chainSimulatorIntegrationTests.ChainSimulator, + owner dtos.WalletAddress, +) *transaction.Transaction { + account, err := cs.GetAccount(owner) + require.Nil(t, err) + + txDataField := fmt.Sprintf("makeNewContractFromValidatorData@%s@%s", maxCap, hexServiceFee) + return chainSimulatorIntegrationTests.GenerateTransaction(owner.Bytes, account.Nonce, vm.DelegationManagerSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, gasLimitForConvertOperation) +} + +// Test description +// Test the creation of a new delegation contract, adding nodes to it, delegating, and undelegating. + +// Test scenario +// 1. Initialize the chain simulator +// 2. Generate blocks to activate staking phases +// 3. Create a new delegation contract +// 4. Add validator nodes to the delegation contract +// 5. Perform delegation operations +// 6. Perform undelegation operations +// 7. Validate the results at each step +func TestChainSimulator_CreateNewDelegationContract(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test scenario done in staking 3.5 phase (staking v4 is not active) + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is staked + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 1) + }) + + // Test scenario done in staking v4 phase step 1 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is in action list + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 2) + }) + + // Test scenario done in staking v4 phase step 2 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is in action list + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 3) + }) + + // Test scenario done in staking v4 phase step 3 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is in action list + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 4) + }) + +} + +func testChainSimulatorCreateNewDelegationContract(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + initialFunds := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(10000)) // 10000 EGLD for each + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + delegator1, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + delegator2, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + maxDelegationCap := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(51000)) // 51000 EGLD cap + txCreateDelegationContract := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 0, vm.DelegationManagerSCAddress, staking.InitialDelegationValue, + fmt.Sprintf("createNewDelegationContract@%s@%s", hex.EncodeToString(maxDelegationCap.Bytes()), hexServiceFee), + gasLimitForDelegationContractCreationOperation) + createDelegationContractTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txCreateDelegationContract, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, createDelegationContractTx) + + // Check delegation contract creation was successful + data := createDelegationContractTx.SmartContractResults[0].Data + parts := strings.Split(data, "@") + require.Equal(t, 3, len(parts)) + + require.Equal(t, hex.EncodeToString([]byte("ok")), parts[1]) + delegationContractAddressHex, _ := hex.DecodeString(parts[2]) + delegationContractAddress, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(delegationContractAddressHex) + + output, err := executeQuery(cs, core.MetachainShardId, vm.DelegationManagerSCAddress, "getAllContractAddresses", nil) + require.Nil(t, err) + returnAddress, err := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(output.ReturnData[0]) + require.Nil(t, err) + require.Equal(t, delegationContractAddress, returnAddress) + delegationContractAddressBytes := output.ReturnData[0] + + // Step 2: Add validator nodes to the delegation contract + // This step requires generating BLS keys for validators, signing messages, and sending the "addNodes" transaction. + // Add checks to verify nodes are added successfully. + validatorSecretKeysBytes, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(validatorSecretKeysBytes) + require.Nil(t, err) + + signatures := getSignatures(delegationContractAddressBytes, validatorSecretKeysBytes) + txAddNodes := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 1, delegationContractAddressBytes, chainSimulatorIntegrationTests.ZeroValue, addNodesTxData(blsKeys, signatures), gasLimitForAddNodesOperation) + addNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txAddNodes, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, addNodesTx) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys := getNodesFromContract(output.ReturnData) + require.Equal(t, 0, len(stakedKeys)) + require.Equal(t, 1, len(notStakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(notStakedKeys[0])) + require.Equal(t, 0, len(unStakedKeys)) + + expectedTopUp := big.NewInt(0).Set(staking.InitialDelegationValue) + expectedTotalStaked := big.NewInt(0).Set(staking.InitialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{validatorOwner.Bytes}) + require.Nil(t, err) + require.Equal(t, staking.InitialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + // Step 3: Perform delegation operations + txDelegate1 := chainSimulatorIntegrationTests.GenerateTransaction(delegator1.Bytes, 0, delegationContractAddressBytes, staking.InitialDelegationValue, "delegate", gasLimitForDelegate) + delegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate1, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate1Tx) + + expectedTopUp = expectedTopUp.Add(expectedTopUp, staking.InitialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Add(expectedTotalStaked, staking.InitialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator1.Bytes}) + require.Nil(t, err) + require.Equal(t, staking.InitialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + txDelegate2 := chainSimulatorIntegrationTests.GenerateTransaction(delegator2.Bytes, 0, delegationContractAddressBytes, staking.InitialDelegationValue, "delegate", gasLimitForDelegate) + delegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate2, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate2Tx) + + expectedTopUp = expectedTopUp.Add(expectedTopUp, staking.InitialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Add(expectedTotalStaked, staking.InitialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator2.Bytes}) + require.Nil(t, err) + require.Equal(t, staking.InitialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + // Step 4: Perform stakeNodes + + txStakeNodes := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 2, delegationContractAddressBytes, chainSimulatorIntegrationTests.ZeroValue, fmt.Sprintf("stakeNodes@%s", blsKeys[0]), staking.GasLimitForStakeOperation) + stakeNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStakeNodes, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeNodesTx) + + expectedTopUp = expectedTopUp.Sub(expectedTopUp, staking.InitialDelegationValue) + expectedTopUp = expectedTopUp.Sub(expectedTopUp, staking.InitialDelegationValue) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + // Make block finalized + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationContractAddressBytes, blsKeys[0], expectedTopUp, 1) + + // Step 5: Perform unDelegate from 1 user + // The nodes should remain in the staked state + // The total active stake should be reduced by the amount undelegated + + txUndelegate1 := chainSimulatorIntegrationTests.GenerateTransaction(delegator1.Bytes, 1, delegationContractAddressBytes, chainSimulatorIntegrationTests.ZeroValue, fmt.Sprintf("unDelegate@%s", hex.EncodeToString(staking.InitialDelegationValue.Bytes())), gasLimitForUndelegateOperation) + undelegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUndelegate1, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, undelegate1Tx) + + expectedTopUp = expectedTopUp.Sub(expectedTopUp, staking.InitialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Sub(expectedTotalStaked, staking.InitialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp.String(), getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes).String()) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator1.Bytes}) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.ZeroValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + // Step 6: Perform unDelegate from last user + // The nodes should change to unStaked state + // The total active stake should be reduced by the amount undelegated + + txUndelegate2 := chainSimulatorIntegrationTests.GenerateTransaction(delegator2.Bytes, 1, delegationContractAddressBytes, chainSimulatorIntegrationTests.ZeroValue, fmt.Sprintf("unDelegate@%s", hex.EncodeToString(staking.InitialDelegationValue.Bytes())), gasLimitForUndelegateOperation) + undelegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUndelegate2, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, undelegate2Tx) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, "1250000000000000000000", big.NewInt(0).SetBytes(output.ReturnData[0]).String()) + require.Equal(t, chainSimulatorIntegrationTests.ZeroValue, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator2.Bytes}) + require.Nil(t, err) + require.Equal(t, "0", big.NewInt(0).SetBytes(output.ReturnData[0]).String()) + + // still staked until epoch change + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 0, len(stakedKeys)) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 1, len(unStakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(unStakedKeys[0])) +} + +func TestChainSimulator_MaxDelegationCap(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test scenario done in staking 3.5 phase (staking v4 is not active) + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 3 delegators + // 3. Create a new delegation contract with 1250 egld and maximum delegation cap of 3000 EGLD + // 4. Add node to the delegation contract + // 5. Delegate from user A 1250 EGLD, check the topup is 2500 + // 6. Delegate from user B 501 EGLD, check it fails + // 7. Stake node, check the topup is 0, check the node is staked + // 8. Delegate from user B 501 EGLD, check it fails + // 9. Delegate from user B 500 EGLD, check the topup is 500 + // 10. Delegate from user B 20 EGLD, check it fails + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMaxDelegationCap(t, cs, 1) + }) + + // Test scenario done in staking v4 phase step 1 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 3 delegators + // 3. Create a new delegation contract with 1250 egld and maximum delegation cap of 3000 EGLD + // 4. Add node to the delegation contract + // 5. Delegate from user A 1250 EGLD, check the topup is 2500 + // 6. Delegate from user B 501 EGLD, check it fails + // 7. Stake node, check the topup is 0, check the node is staked, check the node is in action list + // 8. Delegate from user B 501 EGLD, check it fails + // 9. Delegate from user B 500 EGLD, check the topup is 500 + // 10. Delegate from user B 20 EGLD, check it fails + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMaxDelegationCap(t, cs, 2) + }) + + // Test scenario done in staking v4 phase step 2 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 3 delegators + // 3. Create a new delegation contract with 1250 egld and maximum delegation cap of 3000 EGLD + // 4. Add node to the delegation contract + // 5. Delegate from user A 1250 EGLD, check the topup is 2500 + // 6. Delegate from user B 501 EGLD, check it fails + // 7. Stake node, check the topup is 0, check the node is staked, check the node is in action list + // 8. Delegate from user B 501 EGLD, check it fails + // 9. Delegate from user B 500 EGLD, check the topup is 500 + // 10. Delegate from user B 20 EGLD, check it fails + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMaxDelegationCap(t, cs, 3) + }) + + // Test scenario done in staking v4 phase step 3 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 3 delegators + // 3. Create a new delegation contract with 1250 egld + // 4. Add node to the delegation contract + // 5. Delegate from user A 1250 EGLD, check the topup is 2500 + // 6. Delegate from user B 501 EGLD, check it fails + // 7. Stake node, check the topup is 0, check the node is staked, check the node is in action list + // 8. Delegate from user B 501 EGLD, check it fails + // 9. Delegate from user B 500 EGLD, check the topup is 500 + // 10. Delegate from user B 20 EGLD, check it fails + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMaxDelegationCap(t, cs, 4) + }) + +} + +func testChainSimulatorMaxDelegationCap(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + initialFunds := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(10000)) // 10000 EGLD for each + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + delegatorA, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + delegatorB, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + delegatorC, err := cs.GenerateAndMintWalletAddress(core.AllShardId, initialFunds) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + // Step 3: Create a new delegation contract + + maxDelegationCap := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(3000)) // 3000 EGLD cap + txCreateDelegationContract := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 0, vm.DelegationManagerSCAddress, staking.InitialDelegationValue, + fmt.Sprintf("createNewDelegationContract@%s@%s", hex.EncodeToString(maxDelegationCap.Bytes()), hexServiceFee), + gasLimitForDelegationContractCreationOperation) + createDelegationContractTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txCreateDelegationContract, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, createDelegationContractTx) + + output, err := executeQuery(cs, core.MetachainShardId, vm.DelegationManagerSCAddress, "getAllContractAddresses", nil) + require.Nil(t, err) + delegationContractAddress := output.ReturnData[0] + + // Step 2: Add validator nodes to the delegation contract + // This step requires generating BLS keys for validators, signing messages, and sending the "addNodes" transaction. + // Add checks to verify nodes are added successfully. + validatorSecretKeysBytes, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(validatorSecretKeysBytes) + require.Nil(t, err) + + signatures := getSignatures(delegationContractAddress, validatorSecretKeysBytes) + txAddNodes := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 1, delegationContractAddress, chainSimulatorIntegrationTests.ZeroValue, addNodesTxData(blsKeys, signatures), gasLimitForAddNodesOperation) + addNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txAddNodes, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, addNodesTx) + + expectedTopUp := big.NewInt(0).Set(staking.InitialDelegationValue) + expectedTotalStaked := big.NewInt(0).Set(staking.InitialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddress)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getUserActiveStake", [][]byte{validatorOwner.Bytes}) + require.Nil(t, err) + require.Equal(t, staking.InitialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + // Step 3: Perform delegation operations + tx1delegatorA := chainSimulatorIntegrationTests.GenerateTransaction(delegatorA.Bytes, 0, delegationContractAddress, staking.InitialDelegationValue, "delegate", gasLimitForDelegate) + delegatorATx1, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx1delegatorA, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegatorATx1) + + expectedTopUp = expectedTopUp.Add(expectedTopUp, staking.InitialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Add(expectedTotalStaked, staking.InitialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddress)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getUserActiveStake", [][]byte{delegatorA.Bytes}) + require.Nil(t, err) + require.Equal(t, staking.InitialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + delegateValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(501)) // 501 EGLD + tx1delegatorB := chainSimulatorIntegrationTests.GenerateTransaction(delegatorB.Bytes, 0, delegationContractAddress, delegateValue, "delegate", gasLimitForDelegate) + delegatorBTx1, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx1delegatorB, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegatorBTx1) + require.Equal(t, delegatorBTx1.SmartContractResults[0].ReturnMessage, "total delegation cap reached") + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddress)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getUserActiveStake", [][]byte{delegatorB.Bytes}) + require.Nil(t, err) + require.Zero(t, len(output.ReturnData)) + require.Equal(t, "view function works only for existing delegators", output.ReturnMessage) + + // Step 4: Perform stakeNodes + + txStakeNodes := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, 2, delegationContractAddress, chainSimulatorIntegrationTests.ZeroValue, fmt.Sprintf("stakeNodes@%s", blsKeys[0]), gasLimitForDelegate) + stakeNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStakeNodes, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeNodesTx) + + require.Equal(t, chainSimulatorIntegrationTests.ZeroValue.String(), getBLSTopUpValue(t, metachainNode, delegationContractAddress).String()) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys := getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationContractAddress, blsKeys[0], chainSimulatorIntegrationTests.ZeroValue, 1) + + tx2delegatorB := chainSimulatorIntegrationTests.GenerateTransaction(delegatorB.Bytes, 1, delegationContractAddress, delegateValue, "delegate", gasLimitForDelegate) + delegatorBTx2, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx2delegatorB, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegatorBTx2) + require.Equal(t, delegatorBTx2.SmartContractResults[0].ReturnMessage, "total delegation cap reached") + + // check the tx failed + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, chainSimulatorIntegrationTests.ZeroValue.String(), getBLSTopUpValue(t, metachainNode, delegationContractAddress).String()) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getUserActiveStake", [][]byte{delegatorB.Bytes}) + require.Nil(t, err) + require.Zero(t, len(output.ReturnData)) + require.Equal(t, "view function works only for existing delegators", output.ReturnMessage) + + delegateValue = big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(500)) // 500 EGLD + tx3delegatorB := chainSimulatorIntegrationTests.GenerateTransaction(delegatorB.Bytes, 2, delegationContractAddress, delegateValue, "delegate", gasLimitForDelegate) + delegatorBTx3, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx3delegatorB, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegatorBTx3) + + expectedTopUp = big.NewInt(0).Set(delegateValue) + expectedTotalStaked = expectedTotalStaked.Add(expectedTotalStaked, delegateValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddress)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getUserActiveStake", [][]byte{delegatorB.Bytes}) + require.Nil(t, err) + require.Equal(t, delegateValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + delegateValue = big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(20)) // 20 EGLD + tx1DelegatorC := chainSimulatorIntegrationTests.GenerateTransaction(delegatorC.Bytes, 0, delegationContractAddress, delegateValue, "delegate", gasLimitForDelegate) + delegatorCTx1, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx1DelegatorC, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegatorCTx1) + require.Equal(t, delegatorBTx2.SmartContractResults[0].ReturnMessage, "total delegation cap reached") + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddress)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddress, "getUserActiveStake", [][]byte{delegatorC.Bytes}) + require.Nil(t, err) + require.Zero(t, len(output.ReturnData)) + require.Equal(t, "view function works only for existing delegators", output.ReturnMessage) +} + +func executeQuery(cs chainSimulatorIntegrationTests.ChainSimulator, shardID uint32, scAddress []byte, funcName string, args [][]byte) (*dataVm.VMOutputApi, error) { + output, _, err := cs.GetNodeHandler(shardID).GetFacadeHandler().ExecuteSCQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: funcName, + Arguments: args, + }) + return output, err +} + +func addNodesTxData(blsKeys []string, sigs [][]byte) string { + txData := "addNodes" + + for i := range blsKeys { + txData = txData + "@" + blsKeys[i] + "@" + hex.EncodeToString(sigs[i]) + } + + return txData +} + +func getSignatures(msg []byte, blsKeys [][]byte) [][]byte { + signer := mclsig.NewBlsSigner() + + signatures := make([][]byte, len(blsKeys)) + for i, blsKey := range blsKeys { + sk, _ := signing.NewKeyGenerator(mcl.NewSuiteBLS12()).PrivateKeyFromByteArray(blsKey) + signatures[i], _ = signer.Sign(sk, msg) + } + + return signatures +} + +func getNodesFromContract(returnData [][]byte) ([][]byte, [][]byte, [][]byte) { + var stakedKeys, notStakedKeys, unStakedKeys [][]byte + + for i := 0; i < len(returnData); i += 2 { + switch string(returnData[i]) { + case "staked": + stakedKeys = append(stakedKeys, returnData[i+1]) + case "notStaked": + notStakedKeys = append(notStakedKeys, returnData[i+1]) + case "unStaked": + unStakedKeys = append(unStakedKeys, returnData[i+1]) + } + } + return stakedKeys, notStakedKeys, unStakedKeys +} + +func getBLSTopUpValue(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte) *big.Int { + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStakedTopUpStakedBlsKeys", + CallerAddr: vm.StakingSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{address}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.OkReturnCode, result.ReturnCode) + + if len(result.ReturnData[0]) == 0 { + return big.NewInt(0) + } + + return big.NewInt(0).SetBytes(result.ReturnData[0]) +} + +// Test description: +// Test that merging delegation with whiteListForMerge and mergeValidatorToDelegationWithWhitelist contracts still works properly +// Test that their topups will merge too and will be used by auction list computing. +// +// Internal test scenario #12 +func TestChainSimulator_MergeDelegation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test steps: + // 1. User A - Stake 1 node to have 100 egld more than minimum required stake value + // 2. User A - Execute `makeNewContractFromValidatorData` to create delegation contract based on User A account + // 3. User B - Stake 1 node with more than 2500 egld + // 4. User A - Execute `whiteListForMerge@addressA` in order to whitelist for merge User B + // 5. User B - Execute `mergeValidatorToDelegationWithWhitelist@delegationContract` in order to merge User B to delegation contract created at step 2. + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 4) + }) +} + +func testChainSimulatorMergingDelegation(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(3) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(3000) + mintValue = mintValue.Mul(chainSimulatorIntegrationTests.OneEGLD, mintValue) + + validatorA, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + validatorB, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + log.Info("Step 1. User A: - stake 1 node to have 100 egld more than minimum stake value") + stakeValue := big.NewInt(0).Set(chainSimulatorIntegrationTests.MinimumStakeValue) + addedStakedValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(100)) + stakeValue.Add(stakeValue, addedStakedValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], staking.MockBLSSignature) + txStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorA.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorA.Bytes, blsKeys[0], addedStakedValue, 1) + require.Equal(t, addedStakedValue, getBLSTopUpValue(t, metachainNode, validatorA.Bytes)) + + log.Info("Step 2. Execute MakeNewContractFromValidatorData for User A") + txDataField = fmt.Sprintf("makeNewContractFromValidatorData@%s@%s", maxCap, hexServiceFee) + txConvert := chainSimulatorIntegrationTests.GenerateTransaction(validatorA.Bytes, 1, vm.DelegationManagerSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, gasLimitForConvertOperation) + convertTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txConvert, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, convertTx) + + delegationAddress := convertTx.Logs.Events[0].Topics[1] + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], addedStakedValue, 1) + + log.Info("Step 3. User B: - stake 1 node to have 100 egld more") + stakeValue = big.NewInt(0).Set(chainSimulatorIntegrationTests.MinimumStakeValue) + addedStakedValue = big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(100)) + stakeValue.Add(stakeValue, addedStakedValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], staking.MockBLSSignature) + txStake = chainSimulatorIntegrationTests.GenerateTransaction(validatorB.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, staking.GasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorB.Bytes, blsKeys[1], addedStakedValue, 2) + require.Equal(t, addedStakedValue, getBLSTopUpValue(t, metachainNode, validatorB.Bytes)) + + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + require.Equal(t, delegationAddress, getBLSKeyOwner(t, metachainNode, decodedBLSKey0)) + + decodedBLSKey1, _ := hex.DecodeString(blsKeys[1]) + require.Equal(t, validatorB.Bytes, getBLSKeyOwner(t, metachainNode, decodedBLSKey1)) + + log.Info("Step 4. User A : whitelistForMerge@addressB") + txDataField = fmt.Sprintf("whitelistForMerge@%s", hex.EncodeToString(validatorB.Bytes)) + whitelistForMerge := chainSimulatorIntegrationTests.GenerateTransaction(validatorA.Bytes, 2, delegationAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, gasLimitForDelegate) + whitelistForMergeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(whitelistForMerge, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, whitelistForMergeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + log.Info("Step 5. User A : mergeValidatorToDelegationWithWhitelist") + txDataField = fmt.Sprintf("mergeValidatorToDelegationWithWhitelist@%s", hex.EncodeToString(delegationAddress)) + + txConvert = chainSimulatorIntegrationTests.GenerateTransaction(validatorB.Bytes, 1, vm.DelegationManagerSCAddress, chainSimulatorIntegrationTests.ZeroValue, txDataField, gasLimitForMergeOperation) + convertTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txConvert, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, convertTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + decodedBLSKey0, _ = hex.DecodeString(blsKeys[0]) + require.Equal(t, delegationAddress, getBLSKeyOwner(t, metachainNode, decodedBLSKey0)) + + decodedBLSKey1, _ = hex.DecodeString(blsKeys[1]) + require.Equal(t, delegationAddress, getBLSKeyOwner(t, metachainNode, decodedBLSKey1)) + + expectedTopUpValue := big.NewInt(0).Mul(chainSimulatorIntegrationTests.OneEGLD, big.NewInt(200)) + require.Equal(t, expectedTopUpValue, getBLSTopUpValue(t, metachainNode, delegationAddress)) +} + +func getBLSKeyOwner(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) []byte { + scQuery := &process.SCQuery{ + ScAddress: vm.StakingSCAddress, + FuncName: "getOwner", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, chainSimulatorIntegrationTests.OkReturnCode, result.ReturnCode) + + return result.ReturnData[0] +} diff --git a/integrationTests/chainSimulator/staking/stakingProvider/stakingProviderWithNodesinQueue_test.go b/integrationTests/chainSimulator/staking/stakingProvider/stakingProviderWithNodesinQueue_test.go new file mode 100644 index 00000000000..dd89ecf2c28 --- /dev/null +++ b/integrationTests/chainSimulator/staking/stakingProvider/stakingProviderWithNodesinQueue_test.go @@ -0,0 +1,149 @@ +package stakingProvider + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-go/config" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator/staking" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/vm" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/stretchr/testify/require" +) + +const ( + defaultPathToInitialConfig = "../../../../cmd/node/config/" +) + +func TestStakingProviderWithNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + stakingV4ActivationEpoch := uint32(2) + + t.Run("staking ph 4 step 1 active", func(t *testing.T) { + testStakingProviderWithNodesReStakeUnStaked(t, stakingV4ActivationEpoch) + }) + + t.Run("staking ph 4 step 2 active", func(t *testing.T) { + testStakingProviderWithNodesReStakeUnStaked(t, stakingV4ActivationEpoch+1) + }) + + t.Run("staking ph 4 step 3 active", func(t *testing.T) { + testStakingProviderWithNodesReStakeUnStaked(t, stakingV4ActivationEpoch+2) + }) +} + +func testStakingProviderWithNodesReStakeUnStaked(t *testing.T, stakingV4ActivationEpoch uint32) { + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, stakingV4ActivationEpoch) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + mintValue := big.NewInt(0).Mul(big.NewInt(5000), chainSimulatorIntegrationTests.OneEGLD) + validatorOwner, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + err = cs.GenerateBlocksUntilEpochIsReached(1) + require.Nil(t, err) + + // create delegation contract + stakeValue, _ := big.NewInt(0).SetString("4250000000000000000000", 10) + dataField := "createNewDelegationContract@00@0ea1" + txStake := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, staking.GetNonce(t, cs, validatorOwner), vm.DelegationManagerSCAddress, stakeValue, dataField, 80_000_000) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + delegationAddress := stakeTx.Logs.Events[2].Address + delegationAddressBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(delegationAddress) + + // add nodes in queue + _, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + txDataFieldAddNodes := fmt.Sprintf("addNodes@%s@%s", blsKeys[0], staking.MockBLSSignature+"02") + ownerNonce := staking.GetNonce(t, cs, validatorOwner) + txAddNodes := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, ownerNonce, delegationAddressBytes, big.NewInt(0), txDataFieldAddNodes, staking.GasLimitForStakeOperation) + addNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txAddNodes, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, addNodesTx) + + txDataFieldStakeNodes := fmt.Sprintf("stakeNodes@%s", blsKeys[0]) + ownerNonce = staking.GetNonce(t, cs, validatorOwner) + txStakeNodes := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, ownerNonce, delegationAddressBytes, big.NewInt(0), txDataFieldStakeNodes, staking.GasLimitForStakeOperation) + + stakeNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStakeNodes}, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(stakeNodesTxs)) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + status := staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "queued", status) + + // activate staking v4 + err = cs.GenerateBlocksUntilEpochIsReached(int32(stakingV4ActivationEpoch)) + require.Nil(t, err) + + status = staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "unStaked", status) + + result := staking.GetAllNodeStates(t, metachainNode, delegationAddressBytes) + require.NotNil(t, result) + require.Equal(t, "unStaked", result[blsKeys[0]]) + + ownerNonce = staking.GetNonce(t, cs, validatorOwner) + reStakeTxData := fmt.Sprintf("reStakeUnStakedNodes@%s", blsKeys[0]) + reStakeNodes := chainSimulatorIntegrationTests.GenerateTransaction(validatorOwner.Bytes, ownerNonce, delegationAddressBytes, big.NewInt(0), reStakeTxData, staking.GasLimitForStakeOperation) + reStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(reStakeNodes, staking.MaxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, reStakeTx) + + status = staking.GetBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "staked", status) + + result = staking.GetAllNodeStates(t, metachainNode, delegationAddressBytes) + require.NotNil(t, result) + require.Equal(t, "staked", result[blsKeys[0]]) + + err = cs.GenerateBlocks(20) + require.Nil(t, err) + + staking.CheckValidatorStatus(t, cs, blsKeys[0], "auction") +} diff --git a/integrationTests/chainSimulator/testing.go b/integrationTests/chainSimulator/testing.go new file mode 100644 index 00000000000..212021a8fbd --- /dev/null +++ b/integrationTests/chainSimulator/testing.go @@ -0,0 +1,248 @@ +package chainSimulator + +import ( + "encoding/base64" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/node/chainSimulator/errors" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + + "github.com/multiversx/mx-chain-core-go/core" + coreAPI "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// CheckSetState - +func CheckSetState(t *testing.T, chainSimulator ChainSimulator, nodeHandler chainSimulatorProcess.NodeHandler) { + keyValueMap := map[string]string{ + "01": "01", + "02": "02", + } + + address := "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj" + err := chainSimulator.SetKeyValueForAddress(address, keyValueMap) + require.Nil(t, err) + + err = chainSimulator.GenerateBlocks(1) + require.Nil(t, err) + + keyValuePairs, _, err := nodeHandler.GetFacadeHandler().GetKeyValuePairs(address, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, keyValueMap, keyValuePairs) +} + +// CheckSetEntireState - +func CheckSetEntireState(t *testing.T, chainSimulator ChainSimulator, nodeHandler chainSimulatorProcess.NodeHandler, accountState *dtos.AddressState) { + err := chainSimulator.SetStateMultiple([]*dtos.AddressState{accountState}) + require.Nil(t, err) + + err = chainSimulator.GenerateBlocks(30) + require.Nil(t, err) + + scAddress, _ := nodeHandler.GetCoreComponents().AddressPubKeyConverter().Decode(accountState.Address) + res, _, err := nodeHandler.GetFacadeHandler().ExecuteSCQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: "getSum", + CallerAddr: nil, + BlockNonce: core.OptionalUint64{}, + }) + require.Nil(t, err) + + counterValue := big.NewInt(0).SetBytes(res.ReturnData[0]).Int64() + require.Equal(t, 10, int(counterValue)) + + time.Sleep(time.Second) + + account, _, err := nodeHandler.GetFacadeHandler().GetAccount(accountState.Address, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, accountState.Balance, account.Balance) + require.Equal(t, accountState.DeveloperRewards, account.DeveloperReward) + require.Equal(t, accountState.Code, account.Code) + require.Equal(t, accountState.CodeHash, base64.StdEncoding.EncodeToString(account.CodeHash)) + require.Equal(t, accountState.CodeMetadata, base64.StdEncoding.EncodeToString(account.CodeMetadata)) + require.Equal(t, accountState.Owner, account.OwnerAddress) + require.Equal(t, accountState.RootHash, base64.StdEncoding.EncodeToString(account.RootHash)) +} + +// CheckSetEntireStateWithRemoval - +func CheckSetEntireStateWithRemoval(t *testing.T, chainSimulator ChainSimulator, nodeHandler chainSimulatorProcess.NodeHandler, accountState *dtos.AddressState) { + // activate the auto balancing tries so the results will be the same + err := chainSimulator.GenerateBlocks(30) + require.Nil(t, err) + + err = chainSimulator.SetStateMultiple([]*dtos.AddressState{accountState}) + require.Nil(t, err) + + err = chainSimulator.GenerateBlocks(2) + require.Nil(t, err) + + scAddress, _ := nodeHandler.GetCoreComponents().AddressPubKeyConverter().Decode(accountState.Address) + res, _, err := nodeHandler.GetFacadeHandler().ExecuteSCQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: "getSum", + CallerAddr: nil, + BlockNonce: core.OptionalUint64{}, + }) + require.Nil(t, err) + + counterValue := big.NewInt(0).SetBytes(res.ReturnData[0]).Int64() + require.Equal(t, 10, int(counterValue)) + + account, _, err := nodeHandler.GetFacadeHandler().GetAccount(accountState.Address, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, accountState.Balance, account.Balance) + require.Equal(t, accountState.DeveloperRewards, account.DeveloperReward) + require.Equal(t, accountState.Code, account.Code) + require.Equal(t, accountState.CodeHash, base64.StdEncoding.EncodeToString(account.CodeHash)) + require.Equal(t, accountState.CodeMetadata, base64.StdEncoding.EncodeToString(account.CodeMetadata)) + require.Equal(t, accountState.Owner, account.OwnerAddress) + require.Equal(t, accountState.RootHash, base64.StdEncoding.EncodeToString(account.RootHash)) + + // Now we remove the account + err = chainSimulator.RemoveAccounts([]string{accountState.Address}) + require.Nil(t, err) + + err = chainSimulator.GenerateBlocks(2) + require.Nil(t, err) + + account, _, err = nodeHandler.GetFacadeHandler().GetAccount(accountState.Address, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, "0", account.Balance) + require.Equal(t, "0", account.DeveloperReward) + require.Equal(t, "", account.Code) + require.Equal(t, "", base64.StdEncoding.EncodeToString(account.CodeHash)) + require.Equal(t, "", base64.StdEncoding.EncodeToString(account.CodeMetadata)) + require.Equal(t, "", account.OwnerAddress) + require.Equal(t, "", base64.StdEncoding.EncodeToString(account.RootHash)) + + // Set the state again + err = chainSimulator.SetStateMultiple([]*dtos.AddressState{accountState}) + require.Nil(t, err) + + err = chainSimulator.GenerateBlocks(2) + require.Nil(t, err) + + account, _, err = nodeHandler.GetFacadeHandler().GetAccount(accountState.Address, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + + require.Equal(t, accountState.Balance, account.Balance) + require.Equal(t, accountState.DeveloperRewards, account.DeveloperReward) + require.Equal(t, accountState.Code, account.Code) + require.Equal(t, accountState.CodeHash, base64.StdEncoding.EncodeToString(account.CodeHash)) + require.Equal(t, accountState.CodeMetadata, base64.StdEncoding.EncodeToString(account.CodeMetadata)) + require.Equal(t, accountState.Owner, account.OwnerAddress) + require.Equal(t, accountState.RootHash, base64.StdEncoding.EncodeToString(account.RootHash)) +} + +// CheckGetAccount - +func CheckGetAccount(t *testing.T, chainSimulator ChainSimulator) { + // the facade's GetAccount method requires that at least one block was produced over the genesis block + err := chainSimulator.GenerateBlocks(1) + require.Nil(t, err) + + address := dtos.WalletAddress{ + Bech32: "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj", + } + address.Bytes, err = chainSimulator.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(address.Bech32) + require.Nil(t, err) + + account, err := chainSimulator.GetAccount(address) + require.Nil(t, err) + require.Equal(t, uint64(0), account.Nonce) + require.Equal(t, "0", account.Balance) + + nonce := uint64(37) + err = chainSimulator.SetStateMultiple([]*dtos.AddressState{ + { + Address: address.Bech32, + Nonce: &nonce, + Balance: big.NewInt(38).String(), + }, + }) + require.Nil(t, err) + + // without this call the test will fail because the latest produced block points to a state roothash that tells that + // the account has the nonce 0 + _ = chainSimulator.GenerateBlocks(1) + + account, err = chainSimulator.GetAccount(address) + require.Nil(t, err) + require.Equal(t, uint64(37), account.Nonce) + require.Equal(t, "38", account.Balance) +} + +// CheckGenerateTransactions - +func CheckGenerateTransactions(t *testing.T, chainSimulator ChainSimulator) { + transferValue := big.NewInt(0).Mul(OneEGLD, big.NewInt(5)) + + wallet0, err := chainSimulator.GenerateAndMintWalletAddress(0, InitialAmount) + require.Nil(t, err) + + wallet1, err := chainSimulator.GenerateAndMintWalletAddress(1, InitialAmount) + require.Nil(t, err) + + wallet2, err := chainSimulator.GenerateAndMintWalletAddress(2, InitialAmount) + require.Nil(t, err) + + wallet3, err := chainSimulator.GenerateAndMintWalletAddress(2, InitialAmount) + require.Nil(t, err) + + wallet4, err := chainSimulator.GenerateAndMintWalletAddress(2, InitialAmount) + require.Nil(t, err) + + err = chainSimulator.GenerateBlocks(1) + require.Nil(t, err) + + gasLimit := uint64(50000) + tx0 := GenerateTransaction(wallet0.Bytes, 0, wallet2.Bytes, transferValue, "", gasLimit) + tx1 := GenerateTransaction(wallet1.Bytes, 0, wallet2.Bytes, transferValue, "", gasLimit) + tx3 := GenerateTransaction(wallet3.Bytes, 0, wallet4.Bytes, transferValue, "", gasLimit) + + maxNumOfBlockToGenerateWhenExecutingTx := 15 + + t.Run("nil or empty slice of transactions should error", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted(nil, 1) + assert.Equal(t, errors.ErrEmptySliceOfTxs, errSend) + assert.Nil(t, sentTxs) + + sentTxs, errSend = chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted(make([]*transaction.Transaction, 0), 1) + assert.Equal(t, errors.ErrEmptySliceOfTxs, errSend) + assert.Nil(t, sentTxs) + }) + t.Run("invalid max number of blocks to generate should error", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{tx0, tx1}, 0) + assert.Equal(t, errors.ErrInvalidMaxNumOfBlocks, errSend) + assert.Nil(t, sentTxs) + }) + t.Run("nil transaction in slice should error", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{nil}, 1) + assert.ErrorIs(t, errSend, errors.ErrNilTransaction) + assert.Nil(t, sentTxs) + }) + t.Run("2 transactions from different shard should call send correctly", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{tx0, tx1}, maxNumOfBlockToGenerateWhenExecutingTx) + assert.Equal(t, 2, len(sentTxs)) + assert.Nil(t, errSend) + + account, errGet := chainSimulator.GetAccount(wallet2) + assert.Nil(t, errGet) + expectedBalance := big.NewInt(0).Add(InitialAmount, transferValue) + expectedBalance.Add(expectedBalance, transferValue) + assert.Equal(t, expectedBalance.String(), account.Balance) + }) + t.Run("1 transaction should be sent correctly", func(t *testing.T) { + _, errSend := chainSimulator.SendTxAndGenerateBlockTilTxIsExecuted(tx3, maxNumOfBlockToGenerateWhenExecutingTx) + assert.Nil(t, errSend) + + account, errGet := chainSimulator.GetAccount(wallet4) + assert.Nil(t, errGet) + expectedBalance := big.NewInt(0).Add(InitialAmount, transferValue) + assert.Equal(t, expectedBalance.String(), account.Balance) + }) +} diff --git a/integrationTests/chainSimulator/vm/egldMultiTransfer_test.go b/integrationTests/chainSimulator/vm/egldMultiTransfer_test.go new file mode 100644 index 00000000000..10a10cee5ad --- /dev/null +++ b/integrationTests/chainSimulator/vm/egldMultiTransfer_test.go @@ -0,0 +1,763 @@ +package vm + +import ( + "encoding/hex" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee" + "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/vm" + "github.com/stretchr/testify/require" +) + +func TestChainSimulator_EGLD_MultiTransfer(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + activationEpoch := uint32(4) + + baseIssuingCost := "1000" + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.EGLDInMultiTransferEnableEpoch = activationEpoch + cfg.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost = baseIssuingCost + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch)) + require.Nil(t, err) + + // issue metaESDT + metaESDTTicker := []byte("METATICKER") + nonce := uint64(0) + tx := issueMetaESDTTx(nonce, addrs[0].Bytes, metaESDTTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + metaESDTTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], metaESDTTokenID, roles) + nonce++ + + log.Info("Issued metaESDT token id", "tokenID", string(metaESDTTokenID)) + + // issue NFT + nftTicker := []byte("NFTTICKER") + tx = issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + // issue SFT + sftTicker := []byte("SFTTICKER") + tx = issueSemiFungibleTx(nonce, addrs[0].Bytes, sftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + sftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], sftTokenID, roles) + nonce++ + + log.Info("Issued SFT token id", "tokenID", string(sftTokenID)) + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + sftMetaData := txsFee.GetDefaultMetaData() + sftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + esdtMetaData := txsFee.GetDefaultMetaData() + esdtMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tokenIDs := [][]byte{ + nftTokenID, + sftTokenID, + metaESDTTokenID, + } + + tokensMetadata := []*txsFee.MetaData{ + nftMetaData, + sftMetaData, + esdtMetaData, + } + + for i := range tokenIDs { + tx = nftCreateTx(nonce, addrs[0].Bytes, tokenIDs[i], tokensMetadata[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + fmt.Println(txResult) + fmt.Println(string(txResult.Logs.Events[0].Topics[0])) + fmt.Println(string(txResult.Logs.Events[0].Topics[1])) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + account0, err := cs.GetAccount(addrs[0]) + require.Nil(t, err) + + beforeBalanceStr0 := account0.Balance + + account1, err := cs.GetAccount(addrs[1]) + require.Nil(t, err) + + beforeBalanceStr1 := account1.Balance + + egldValue := oneEGLD.Mul(oneEGLD, big.NewInt(3)) + tx = multiESDTNFTTransferWithEGLDTx(nonce, addrs[0].Bytes, addrs[1].Bytes, tokenIDs, egldValue) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + egldLog := string(txResult.Logs.Events[0].Topics[0]) + require.Equal(t, "EGLD-000000", egldLog) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + // check accounts balance + account0, err = cs.GetAccount(addrs[0]) + require.Nil(t, err) + + beforeBalance0, _ := big.NewInt(0).SetString(beforeBalanceStr0, 10) + + expectedBalance0 := big.NewInt(0).Sub(beforeBalance0, egldValue) + txsFee, _ := big.NewInt(0).SetString(txResult.Fee, 10) + expectedBalanceWithFee0 := big.NewInt(0).Sub(expectedBalance0, txsFee) + + require.Equal(t, expectedBalanceWithFee0.String(), account0.Balance) + + account1, err = cs.GetAccount(addrs[1]) + require.Nil(t, err) + + beforeBalance1, _ := big.NewInt(0).SetString(beforeBalanceStr1, 10) + expectedBalance1 := big.NewInt(0).Add(beforeBalance1, egldValue) + + require.Equal(t, expectedBalance1.String(), account1.Balance) +} + +func TestChainSimulator_EGLD_MultiTransfer_Insufficient_Funds(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + activationEpoch := uint32(4) + + baseIssuingCost := "1000" + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.EGLDInMultiTransferEnableEpoch = activationEpoch + cfg.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost = baseIssuingCost + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch)) + require.Nil(t, err) + + // issue NFT + nftTicker := []byte("NFTTICKER") + nonce := uint64(0) + tx := issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tx = nftCreateTx(nonce, addrs[0].Bytes, nftTokenID, nftMetaData) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + account0, err := cs.GetAccount(addrs[0]) + require.Nil(t, err) + + beforeBalanceStr0 := account0.Balance + + account1, err := cs.GetAccount(addrs[1]) + require.Nil(t, err) + + beforeBalanceStr1 := account1.Balance + + egldValue, _ := big.NewInt(0).SetString(beforeBalanceStr0, 10) + egldValue = egldValue.Add(egldValue, big.NewInt(13)) + tx = multiESDTNFTTransferWithEGLDTx(nonce, addrs[0].Bytes, addrs[1].Bytes, [][]byte{nftTokenID}, egldValue) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.NotEqual(t, "success", txResult.Status.String()) + + eventLog := string(txResult.Logs.Events[0].Topics[1]) + require.Equal(t, "insufficient funds for token EGLD-000000", eventLog) + + // check accounts balance + account0, err = cs.GetAccount(addrs[0]) + require.Nil(t, err) + + beforeBalance0, _ := big.NewInt(0).SetString(beforeBalanceStr0, 10) + + txsFee, _ := big.NewInt(0).SetString(txResult.Fee, 10) + expectedBalanceWithFee0 := big.NewInt(0).Sub(beforeBalance0, txsFee) + + require.Equal(t, expectedBalanceWithFee0.String(), account0.Balance) + + account1, err = cs.GetAccount(addrs[1]) + require.Nil(t, err) + + require.Equal(t, beforeBalanceStr1, account1.Balance) +} + +func TestChainSimulator_EGLD_MultiTransfer_Invalid_Value(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + activationEpoch := uint32(4) + + baseIssuingCost := "1000" + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.EGLDInMultiTransferEnableEpoch = activationEpoch + cfg.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost = baseIssuingCost + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch)) + require.Nil(t, err) + + // issue NFT + nftTicker := []byte("NFTTICKER") + nonce := uint64(0) + tx := issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tx = nftCreateTx(nonce, addrs[0].Bytes, nftTokenID, nftMetaData) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + account0, err := cs.GetAccount(addrs[0]) + require.Nil(t, err) + + beforeBalanceStr0 := account0.Balance + + account1, err := cs.GetAccount(addrs[1]) + require.Nil(t, err) + + beforeBalanceStr1 := account1.Balance + + egldValue := oneEGLD.Mul(oneEGLD, big.NewInt(3)) + tx = multiESDTNFTTransferWithEGLDTx(nonce, addrs[0].Bytes, addrs[1].Bytes, [][]byte{nftTokenID}, egldValue) + tx.Value = egldValue // invalid value field + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.NotEqual(t, "success", txResult.Status.String()) + + eventLog := string(txResult.Logs.Events[0].Topics[1]) + require.Equal(t, "built in function called with tx value is not allowed", eventLog) + + // check accounts balance + account0, err = cs.GetAccount(addrs[0]) + require.Nil(t, err) + + beforeBalance0, _ := big.NewInt(0).SetString(beforeBalanceStr0, 10) + + txsFee, _ := big.NewInt(0).SetString(txResult.Fee, 10) + expectedBalanceWithFee0 := big.NewInt(0).Sub(beforeBalance0, txsFee) + + require.Equal(t, expectedBalanceWithFee0.String(), account0.Balance) + + account1, err = cs.GetAccount(addrs[1]) + require.Nil(t, err) + + require.Equal(t, beforeBalanceStr1, account1.Balance) +} + +func TestChainSimulator_Multiple_EGLD_Transfers(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + activationEpoch := uint32(4) + + baseIssuingCost := "1000" + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.EGLDInMultiTransferEnableEpoch = activationEpoch + cfg.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost = baseIssuingCost + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch)) + require.Nil(t, err) + + // issue NFT + nftTicker := []byte("NFTTICKER") + nonce := uint64(0) + tx := issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tx = nftCreateTx(nonce, addrs[0].Bytes, nftTokenID, nftMetaData) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + account0, err := cs.GetAccount(addrs[0]) + require.Nil(t, err) + + beforeBalanceStr0 := account0.Balance + + account1, err := cs.GetAccount(addrs[1]) + require.Nil(t, err) + + beforeBalanceStr1 := account1.Balance + + // multi nft transfer with multiple EGLD-000000 tokens + numTransfers := 3 + encodedReceiver := hex.EncodeToString(addrs[1].Bytes) + egldValue := oneEGLD.Mul(oneEGLD, big.NewInt(3)) + + txDataField := []byte(strings.Join( + []string{ + core.BuiltInFunctionMultiESDTNFTTransfer, + encodedReceiver, + hex.EncodeToString(big.NewInt(int64(numTransfers)).Bytes()), + hex.EncodeToString([]byte("EGLD-000000")), + "00", + hex.EncodeToString(egldValue.Bytes()), + hex.EncodeToString(nftTokenID), + hex.EncodeToString(big.NewInt(1).Bytes()), + hex.EncodeToString(big.NewInt(int64(1)).Bytes()), + hex.EncodeToString([]byte("EGLD-000000")), + "00", + hex.EncodeToString(egldValue.Bytes()), + }, "@"), + ) + + tx = &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[0].Bytes, + RcvAddr: addrs[0].Bytes, + GasLimit: 10_000_000, + GasPrice: minGasPrice, + Data: txDataField, + Value: big.NewInt(0), + Version: 1, + Signature: []byte("dummySig"), + ChainID: []byte(configs.ChainID), + } + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + // check accounts balance + account0, err = cs.GetAccount(addrs[0]) + require.Nil(t, err) + + beforeBalance0, _ := big.NewInt(0).SetString(beforeBalanceStr0, 10) + + expectedBalance0 := big.NewInt(0).Sub(beforeBalance0, egldValue) + expectedBalance0 = big.NewInt(0).Sub(expectedBalance0, egldValue) + txsFee, _ := big.NewInt(0).SetString(txResult.Fee, 10) + expectedBalanceWithFee0 := big.NewInt(0).Sub(expectedBalance0, txsFee) + + require.Equal(t, expectedBalanceWithFee0.String(), account0.Balance) + + account1, err = cs.GetAccount(addrs[1]) + require.Nil(t, err) + + beforeBalance1, _ := big.NewInt(0).SetString(beforeBalanceStr1, 10) + expectedBalance1 := big.NewInt(0).Add(beforeBalance1, egldValue) + expectedBalance1 = big.NewInt(0).Add(expectedBalance1, egldValue) + + require.Equal(t, expectedBalance1.String(), account1.Balance) +} + +func multiESDTNFTTransferWithEGLDTx(nonce uint64, sndAdr, rcvAddr []byte, tokens [][]byte, egldValue *big.Int) *transaction.Transaction { + transferData := make([]*utils.TransferESDTData, 0) + + for _, tokenID := range tokens { + transferData = append(transferData, &utils.TransferESDTData{ + Token: tokenID, + Nonce: 1, + Value: big.NewInt(1), + }) + } + + numTransfers := len(tokens) + encodedReceiver := hex.EncodeToString(rcvAddr) + hexEncodedNumTransfers := hex.EncodeToString(big.NewInt(int64(numTransfers)).Bytes()) + hexEncodedEGLD := hex.EncodeToString([]byte("EGLD-000000")) + hexEncodedEGLDNonce := "00" + + txDataField := []byte(strings.Join( + []string{ + core.BuiltInFunctionMultiESDTNFTTransfer, + encodedReceiver, + hexEncodedNumTransfers, + hexEncodedEGLD, + hexEncodedEGLDNonce, + hex.EncodeToString(egldValue.Bytes()), + }, "@"), + ) + + for _, td := range transferData { + hexEncodedToken := hex.EncodeToString(td.Token) + esdtValueEncoded := hex.EncodeToString(td.Value.Bytes()) + hexEncodedNonce := "00" + if td.Nonce != 0 { + hexEncodedNonce = hex.EncodeToString(big.NewInt(int64(td.Nonce)).Bytes()) + } + + txDataField = []byte(strings.Join([]string{string(txDataField), hexEncodedToken, hexEncodedNonce, esdtValueEncoded}, "@")) + } + + tx := &transaction.Transaction{ + Nonce: nonce, + SndAddr: sndAdr, + RcvAddr: sndAdr, + GasLimit: 10_000_000, + GasPrice: minGasPrice, + Data: txDataField, + Value: big.NewInt(0), + Version: 1, + Signature: []byte("dummySig"), + ChainID: []byte(configs.ChainID), + } + + return tx +} + +func TestChainSimulator_IssueToken_EGLDTicker(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + activationEpoch := uint32(4) + + baseIssuingCost := "1000" + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.EGLDInMultiTransferEnableEpoch = activationEpoch + cfg.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost = baseIssuingCost + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch) - 1) + require.Nil(t, err) + + log.Info("Initial setup: Issue token (before the activation of EGLDInMultiTransferFlag)") + + // issue NFT + nftTicker := []byte("EGLD") + nonce := uint64(0) + tx := issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tx = nftCreateTx(nonce, addrs[0].Bytes, nftTokenID, nftMetaData) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch)) + require.Nil(t, err) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + log.Info("Issue token (after activation of EGLDInMultiTransferFlag)") + + // should fail issuing token with EGLD ticker + tx = issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + errMessage := string(txResult.Logs.Events[0].Topics[1]) + require.Equal(t, vm.ErrCouldNotCreateNewTokenIdentifier.Error(), errMessage) + + require.Equal(t, "success", txResult.Status.String()) +} diff --git a/integrationTests/chainSimulator/vm/esdtImprovements_test.go b/integrationTests/chainSimulator/vm/esdtImprovements_test.go new file mode 100644 index 00000000000..438660658f3 --- /dev/null +++ b/integrationTests/chainSimulator/vm/esdtImprovements_test.go @@ -0,0 +1,3916 @@ +package vm + +import ( + "bytes" + "encoding/hex" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/esdt" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" + testsChainSimulator "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee" + "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/vm" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/require" +) + +const ( + defaultPathToInitialConfig = "../../../cmd/node/config/" + + minGasPrice = 1000000000 + maxNumOfBlockToGenerateWhenExecutingTx = 7 +) + +var oneEGLD = big.NewInt(1000000000000000000) + +var log = logger.GetOrCreate("integrationTests/chainSimulator/vm") + +// Test scenario #1 +// +// Initial setup: Create fungible, NFT, SFT and metaESDT tokens +// (before the activation of DynamicEsdtFlag) +// +// 1.check that the metadata for all tokens is saved on the system account +// 2. wait for DynamicEsdtFlag activation +// 3. transfer the tokens to another account +// 4. check that the metadata for all tokens is saved on the system account +// 5. make an updateTokenID@tokenID function call on the ESDTSystem SC for all token types +// 6. check that the metadata for all tokens is saved on the system account +// 7. transfer the tokens to another account +// 8. check that the metaData for the NFT was removed from the system account and moved to the user account +// 9. check that the metaData for the rest of the tokens is still present on the system account and not on the userAccount +// 10. do the test for both intra and cross shard txs +func TestChainSimulator_CheckTokensMetadata_TransferTokens(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("transfer and check all tokens - intra shard", func(t *testing.T) { + transferAndCheckTokensMetaData(t, false, false) + }) + + t.Run("transfer and check all tokens - intra shard - multi transfer", func(t *testing.T) { + transferAndCheckTokensMetaData(t, false, true) + }) + + t.Run("transfer and check all tokens - cross shard", func(t *testing.T) { + transferAndCheckTokensMetaData(t, true, false) + }) + + t.Run("transfer and check all tokens - cross shard - multi transfer", func(t *testing.T) { + transferAndCheckTokensMetaData(t, true, true) + }) +} + +func transferAndCheckTokensMetaData(t *testing.T, isCrossShard bool, isMultiTransfer bool) { + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + activationEpoch := uint32(4) + + baseIssuingCost := "1000" + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.DynamicESDTEnableEpoch = activationEpoch + cfg.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost = baseIssuingCost + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + addrs := createAddresses(t, cs, isCrossShard) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch) - 1) + require.Nil(t, err) + + log.Info("Initial setup: Create NFT, SFT and metaESDT tokens (before the activation of DynamicEsdtFlag)") + + // issue metaESDT + metaESDTTicker := []byte("METATICKER") + nonce := uint64(0) + tx := issueMetaESDTTx(nonce, addrs[0].Bytes, metaESDTTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + metaESDTTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], metaESDTTokenID, roles) + nonce++ + + rolesTransfer := [][]byte{[]byte(core.ESDTRoleTransfer)} + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[1].Bytes, metaESDTTokenID, rolesTransfer) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued metaESDT token id", "tokenID", string(metaESDTTokenID)) + + // issue NFT + nftTicker := []byte("NFTTICKER") + tx = issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[1].Bytes, nftTokenID, rolesTransfer) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + // issue SFT + sftTicker := []byte("SFTTICKER") + tx = issueSemiFungibleTx(nonce, addrs[0].Bytes, sftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + sftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], sftTokenID, roles) + nonce++ + + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[1].Bytes, sftTokenID, rolesTransfer) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued SFT token id", "tokenID", string(sftTokenID)) + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + sftMetaData := txsFee.GetDefaultMetaData() + sftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + esdtMetaData := txsFee.GetDefaultMetaData() + esdtMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tokenIDs := [][]byte{ + nftTokenID, + sftTokenID, + metaESDTTokenID, + } + + tokensMetadata := []*txsFee.MetaData{ + nftMetaData, + sftMetaData, + esdtMetaData, + } + + for i := range tokenIDs { + tx = nftCreateTx(nonce, addrs[0].Bytes, tokenIDs[i], tokensMetadata[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + log.Info("Step 1. check that the metadata for all tokens is saved on the system account") + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + checkMetaData(t, cs, core.SystemAccountAddress, nftTokenID, shardID, nftMetaData) + checkMetaData(t, cs, core.SystemAccountAddress, sftTokenID, shardID, sftMetaData) + checkMetaData(t, cs, core.SystemAccountAddress, metaESDTTokenID, shardID, esdtMetaData) + + log.Info("Step 2. wait for DynamicEsdtFlag activation") + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch)) + require.Nil(t, err) + + log.Info("Step 3. transfer the tokens to another account") + + if isMultiTransfer { + tx = multiESDTNFTTransferTx(nonce, addrs[0].Bytes, addrs[1].Bytes, tokenIDs) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } else { + for _, tokenID := range tokenIDs { + log.Info("transfering token id", "tokenID", tokenID) + + tx = esdtNFTTransferTx(nonce, addrs[0].Bytes, addrs[1].Bytes, tokenID) + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + } + + log.Info("Step 4. check that the metadata for all tokens is saved on the system account") + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + shardID = cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[1].Bytes) + + checkMetaData(t, cs, core.SystemAccountAddress, nftTokenID, shardID, nftMetaData) + checkMetaData(t, cs, core.SystemAccountAddress, sftTokenID, shardID, sftMetaData) + checkMetaData(t, cs, core.SystemAccountAddress, metaESDTTokenID, shardID, esdtMetaData) + + log.Info("Step 5. make an updateTokenID@tokenID function call on the ESDTSystem SC for all token types") + + for _, tokenID := range tokenIDs { + tx = updateTokenIDTx(nonce, addrs[0].Bytes, tokenID) + + log.Info("updating token id", "tokenID", tokenID) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + log.Info("Step 6. check that the metadata for all tokens is saved on the system account") + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + checkMetaData(t, cs, core.SystemAccountAddress, nftTokenID, shardID, nftMetaData) + checkMetaData(t, cs, core.SystemAccountAddress, sftTokenID, shardID, sftMetaData) + checkMetaData(t, cs, core.SystemAccountAddress, metaESDTTokenID, shardID, esdtMetaData) + + log.Info("Step 7. transfer the tokens to another account") + + nonce = uint64(0) + if isMultiTransfer { + tx = multiESDTNFTTransferTx(nonce, addrs[1].Bytes, addrs[2].Bytes, tokenIDs) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + } else { + for _, tokenID := range tokenIDs { + log.Info("transfering token id", "tokenID", tokenID) + + tx = esdtNFTTransferTx(nonce, addrs[1].Bytes, addrs[2].Bytes, tokenID) + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + } + + log.Info("Step 8. check that the metaData for the NFT was removed from the system account and moved to the user account") + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + shardID = cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[2].Bytes) + + checkMetaData(t, cs, addrs[2].Bytes, nftTokenID, shardID, nftMetaData) + checkMetaDataNotInAcc(t, cs, core.SystemAccountAddress, nftTokenID, shardID) + + log.Info("Step 9. check that the metaData for the rest of the tokens is still present on the system account and not on the userAccount") + + checkMetaData(t, cs, core.SystemAccountAddress, sftTokenID, shardID, sftMetaData) + checkMetaDataNotInAcc(t, cs, addrs[2].Bytes, sftTokenID, shardID) + + checkMetaData(t, cs, core.SystemAccountAddress, metaESDTTokenID, shardID, esdtMetaData) + checkMetaDataNotInAcc(t, cs, addrs[2].Bytes, metaESDTTokenID, shardID) +} + +func createAddresses( + t *testing.T, + cs testsChainSimulator.ChainSimulator, + isCrossShard bool, +) []dtos.WalletAddress { + var shardIDs []uint32 + if !isCrossShard { + shardIDs = []uint32{1, 1, 1} + } else { + shardIDs = []uint32{0, 1, 2} + } + + mintValue := big.NewInt(10) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + address, err := cs.GenerateAndMintWalletAddress(shardIDs[0], mintValue) + require.Nil(t, err) + + address2, err := cs.GenerateAndMintWalletAddress(shardIDs[1], mintValue) + require.Nil(t, err) + + address3, err := cs.GenerateAndMintWalletAddress(shardIDs[2], mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + return []dtos.WalletAddress{address, address2, address3} +} + +func checkMetaData( + t *testing.T, + cs testsChainSimulator.ChainSimulator, + addressBytes []byte, + token []byte, + shardID uint32, + expectedMetaData *txsFee.MetaData, +) { + retrievedMetaData := getMetaDataFromAcc(t, cs, addressBytes, token, shardID) + + require.Equal(t, expectedMetaData.Nonce, []byte(hex.EncodeToString(big.NewInt(int64(retrievedMetaData.Nonce)).Bytes()))) + require.Equal(t, expectedMetaData.Name, []byte(hex.EncodeToString(retrievedMetaData.Name))) + require.Equal(t, expectedMetaData.Royalties, []byte(hex.EncodeToString(big.NewInt(int64(retrievedMetaData.Royalties)).Bytes()))) + require.Equal(t, expectedMetaData.Hash, []byte(hex.EncodeToString(retrievedMetaData.Hash))) + for i, uri := range expectedMetaData.Uris { + require.Equal(t, uri, []byte(hex.EncodeToString(retrievedMetaData.URIs[i]))) + } + require.Equal(t, expectedMetaData.Attributes, []byte(hex.EncodeToString(retrievedMetaData.Attributes))) +} + +func checkMetaDataNotInAcc( + t *testing.T, + cs testsChainSimulator.ChainSimulator, + addressBytes []byte, + token []byte, + shardID uint32, +) { + esdtData := getESDTDataFromAcc(t, cs, addressBytes, token, shardID) + + require.Nil(t, esdtData.TokenMetaData) +} + +func multiESDTNFTTransferTx(nonce uint64, sndAdr, rcvAddr []byte, tokens [][]byte) *transaction.Transaction { + transferData := make([]*utils.TransferESDTData, 0) + + for _, tokenID := range tokens { + transferData = append(transferData, &utils.TransferESDTData{ + Token: tokenID, + Nonce: 1, + Value: big.NewInt(1), + }) + } + + tx := utils.CreateMultiTransferTX( + nonce, + sndAdr, + rcvAddr, + minGasPrice, + 10_000_000, + transferData..., + ) + tx.Version = 1 + tx.Signature = []byte("dummySig") + tx.ChainID = []byte(configs.ChainID) + + return tx +} + +func esdtNFTTransferTx(nonce uint64, sndAdr, rcvAddr, token []byte) *transaction.Transaction { + tx := utils.CreateESDTNFTTransferTx( + nonce, + sndAdr, + rcvAddr, + token, + 1, + big.NewInt(1), + minGasPrice, + 10_000_000, + "", + ) + tx.Version = 1 + tx.Signature = []byte("dummySig") + tx.ChainID = []byte(configs.ChainID) + + return tx +} + +func issueTx(nonce uint64, sndAdr []byte, ticker []byte, baseIssuingCost string) *transaction.Transaction { + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + txDataField := bytes.Join( + [][]byte{ + []byte("issue"), + []byte(hex.EncodeToString([]byte("asdname1"))), + []byte(hex.EncodeToString(ticker)), + []byte(hex.EncodeToString(big.NewInt(10).Bytes())), + []byte(hex.EncodeToString(big.NewInt(10).Bytes())), + }, + []byte("@"), + ) + + return &transaction.Transaction{ + Nonce: nonce, + SndAddr: sndAdr, + RcvAddr: core.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } +} + +func issueMetaESDTTx(nonce uint64, sndAdr []byte, ticker []byte, baseIssuingCost string) *transaction.Transaction { + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + txDataField := bytes.Join( + [][]byte{ + []byte("registerMetaESDT"), + []byte(hex.EncodeToString([]byte("asdname"))), + []byte(hex.EncodeToString(ticker)), + []byte(hex.EncodeToString(big.NewInt(10).Bytes())), + }, + []byte("@"), + ) + + return &transaction.Transaction{ + Nonce: nonce, + SndAddr: sndAdr, + RcvAddr: core.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } +} + +func issueNonFungibleTx(nonce uint64, sndAdr []byte, ticker []byte, baseIssuingCost string) *transaction.Transaction { + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + txDataField := bytes.Join( + [][]byte{ + []byte("issueNonFungible"), + []byte(hex.EncodeToString([]byte("asdname"))), + []byte(hex.EncodeToString(ticker)), + }, + []byte("@"), + ) + + return &transaction.Transaction{ + Nonce: nonce, + SndAddr: sndAdr, + RcvAddr: core.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } +} + +func issueSemiFungibleTx(nonce uint64, sndAdr []byte, ticker []byte, baseIssuingCost string) *transaction.Transaction { + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + txDataField := bytes.Join( + [][]byte{ + []byte("issueSemiFungible"), + []byte(hex.EncodeToString([]byte("asdname"))), + []byte(hex.EncodeToString(ticker)), + }, + []byte("@"), + ) + + return &transaction.Transaction{ + Nonce: nonce, + SndAddr: sndAdr, + RcvAddr: core.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } +} + +func changeToDynamicTx(nonce uint64, sndAdr []byte, tokenID []byte) *transaction.Transaction { + txDataField := []byte("changeToDynamic@" + hex.EncodeToString(tokenID)) + + return &transaction.Transaction{ + Nonce: nonce, + SndAddr: sndAdr, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: big.NewInt(0), + ChainID: []byte(configs.ChainID), + Version: 1, + } +} + +func updateTokenIDTx(nonce uint64, sndAdr []byte, tokenID []byte) *transaction.Transaction { + txDataField := []byte("updateTokenID@" + hex.EncodeToString(tokenID)) + + return &transaction.Transaction{ + Nonce: nonce, + SndAddr: sndAdr, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: big.NewInt(0), + ChainID: []byte(configs.ChainID), + Version: 1, + } +} + +func nftCreateTx( + nonce uint64, + sndAdr []byte, + tokenID []byte, + metaData *txsFee.MetaData, +) *transaction.Transaction { + txDataField := bytes.Join( + [][]byte{ + []byte(core.BuiltInFunctionESDTNFTCreate), + []byte(hex.EncodeToString(tokenID)), + []byte(hex.EncodeToString(big.NewInt(1).Bytes())), // quantity + metaData.Name, + []byte(hex.EncodeToString(big.NewInt(10).Bytes())), + metaData.Hash, + metaData.Attributes, + metaData.Uris[0], + metaData.Uris[1], + metaData.Uris[2], + }, + []byte("@"), + ) + + return &transaction.Transaction{ + Nonce: nonce, + SndAddr: sndAdr, + RcvAddr: sndAdr, + GasLimit: 10_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: big.NewInt(0), + ChainID: []byte(configs.ChainID), + Version: 1, + } +} + +func modifyCreatorTx( + nonce uint64, + sndAdr []byte, + tokenID []byte, +) *transaction.Transaction { + txDataField := bytes.Join( + [][]byte{ + []byte(core.ESDTModifyCreator), + []byte(hex.EncodeToString(tokenID)), + []byte(hex.EncodeToString(big.NewInt(1).Bytes())), + }, + []byte("@"), + ) + + return &transaction.Transaction{ + Nonce: nonce, + SndAddr: sndAdr, + RcvAddr: sndAdr, + GasLimit: 10_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: big.NewInt(0), + ChainID: []byte(configs.ChainID), + Version: 1, + } +} + +func getESDTDataFromAcc( + t *testing.T, + cs testsChainSimulator.ChainSimulator, + addressBytes []byte, + token []byte, + shardID uint32, +) *esdt.ESDigitalToken { + account, err := cs.GetNodeHandler(shardID).GetStateComponents().AccountsAdapter().LoadAccount(addressBytes) + require.Nil(t, err) + userAccount, ok := account.(state.UserAccountHandler) + require.True(t, ok) + + baseEsdtKeyPrefix := core.ProtectedKeyPrefix + core.ESDTKeyIdentifier + key := append([]byte(baseEsdtKeyPrefix), token...) + + key2 := append(key, big.NewInt(0).SetUint64(1).Bytes()...) + esdtDataBytes, _, err := userAccount.RetrieveValue(key2) + require.Nil(t, err) + + esdtData := &esdt.ESDigitalToken{} + err = cs.GetNodeHandler(shardID).GetCoreComponents().InternalMarshalizer().Unmarshal(esdtData, esdtDataBytes) + require.Nil(t, err) + + return esdtData +} + +func getMetaDataFromAcc( + t *testing.T, + cs testsChainSimulator.ChainSimulator, + addressBytes []byte, + token []byte, + shardID uint32, +) *esdt.MetaData { + esdtData := getESDTDataFromAcc(t, cs, addressBytes, token, shardID) + + require.NotNil(t, esdtData.TokenMetaData) + + return esdtData.TokenMetaData +} + +func setSpecialRoleTx( + nonce uint64, + sndAddr []byte, + address []byte, + token []byte, + roles [][]byte, +) *transaction.Transaction { + txDataBytes := [][]byte{ + []byte("setSpecialRole"), + []byte(hex.EncodeToString(token)), + []byte(hex.EncodeToString(address)), + } + + for _, role := range roles { + txDataBytes = append(txDataBytes, []byte(hex.EncodeToString(role))) + } + + txDataField := bytes.Join( + txDataBytes, + []byte("@"), + ) + + return &transaction.Transaction{ + Nonce: nonce, + SndAddr: sndAddr, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 60_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: big.NewInt(0), + ChainID: []byte(configs.ChainID), + Version: 1, + } +} + +func setAddressEsdtRoles( + t *testing.T, + cs testsChainSimulator.ChainSimulator, + nonce uint64, + address dtos.WalletAddress, + token []byte, + roles [][]byte, +) { + tx := setSpecialRoleTx(nonce, address.Bytes, address.Bytes, token, roles) + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) +} + +// Test scenario #3 +// +// Initial setup: Create NFT, SFT and metaESDT tokens +// (after the activation of DynamicEsdtFlag) +// +// 1. check that the metaData for the NFT was saved in the user account and not on the system account +// 2. check that the metaData for the other token types is saved on the system account and not at the user account level +func TestChainSimulator_CreateTokensAfterActivation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + log.Info("Initial setup: Create NFT, SFT and metaESDT tokens (after the activation of DynamicEsdtFlag)") + + // issue metaESDT + metaESDTTicker := []byte("METATICKER") + nonce := uint64(0) + tx := issueMetaESDTTx(nonce, addrs[0].Bytes, metaESDTTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + metaESDTTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], metaESDTTokenID, roles) + nonce++ + + log.Info("Issued metaESDT token id", "tokenID", string(metaESDTTokenID)) + + // issue NFT + nftTicker := []byte("NFTTICKER") + tx = issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + // issue SFT + sftTicker := []byte("SFTTICKER") + tx = issueSemiFungibleTx(nonce, addrs[0].Bytes, sftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + sftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], sftTokenID, roles) + nonce++ + + log.Info("Issued SFT token id", "tokenID", string(sftTokenID)) + + tokenIDs := [][]byte{ + nftTokenID, + sftTokenID, + metaESDTTokenID, + } + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + sftMetaData := txsFee.GetDefaultMetaData() + sftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + esdtMetaData := txsFee.GetDefaultMetaData() + esdtMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tokensMetadata := []*txsFee.MetaData{ + nftMetaData, + sftMetaData, + esdtMetaData, + } + + for i := range tokenIDs { + tx = nftCreateTx(nonce, addrs[0].Bytes, tokenIDs[i], tokensMetadata[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + log.Info("Step 1. check that the metaData for the NFT was saved in the user account and not on the system account") + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + checkMetaData(t, cs, addrs[0].Bytes, nftTokenID, shardID, nftMetaData) + checkMetaDataNotInAcc(t, cs, core.SystemAccountAddress, nftTokenID, shardID) + + log.Info("Step 2. check that the metaData for the other token types is saved on the system account and not at the user account level") + + checkMetaData(t, cs, core.SystemAccountAddress, sftTokenID, shardID, sftMetaData) + checkMetaDataNotInAcc(t, cs, addrs[0].Bytes, sftTokenID, shardID) + + checkMetaData(t, cs, core.SystemAccountAddress, metaESDTTokenID, shardID, esdtMetaData) + checkMetaDataNotInAcc(t, cs, addrs[0].Bytes, metaESDTTokenID, shardID) +} + +// Test scenario #4 +// +// Initial setup: Create NFT, SFT, metaESDT tokens +// +// Call ESDTMetaDataRecreate to rewrite the meta data for the nft +// (The sender must have the ESDTMetaDataRecreate role) +func TestChainSimulator_ESDTMetaDataRecreate(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + log.Info("Initial setup: Create NFT, SFT and metaESDT tokens (after the activation of DynamicEsdtFlag)") + + addrs := createAddresses(t, cs, false) + + // issue metaESDT + metaESDTTicker := []byte("METATICKER") + nonce := uint64(0) + tx := issueMetaESDTTx(nonce, addrs[0].Bytes, metaESDTTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + metaESDTTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + } + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, metaESDTTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued metaESDT token id", "tokenID", string(metaESDTTokenID)) + + // issue NFT + nftTicker := []byte("NFTTICKER") + tx = issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, nftTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + // issue SFT + sftTicker := []byte("SFTTICKER") + tx = issueSemiFungibleTx(nonce, addrs[0].Bytes, sftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + sftTokenID := txResult.Logs.Events[0].Topics[0] + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, sftTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued SFT token id", "tokenID", string(sftTokenID)) + + tokenIDs := [][]byte{ + nftTokenID, + sftTokenID, + metaESDTTokenID, + } + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + sftMetaData := txsFee.GetDefaultMetaData() + sftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + esdtMetaData := txsFee.GetDefaultMetaData() + esdtMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tokensMetadata := []*txsFee.MetaData{ + nftMetaData, + sftMetaData, + esdtMetaData, + } + + for i := range tokenIDs { + tx = nftCreateTx(nonce, addrs[0].Bytes, tokenIDs[i], tokensMetadata[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + + tx = changeToDynamicTx(nonce, addrs[0].Bytes, tokenIDs[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + + roles := [][]byte{ + []byte(core.ESDTRoleNFTRecreate), + } + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, tokenIDs[i], roles) + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + log.Info("Call ESDTMetaDataRecreate to rewrite the meta data for the nft") + + for i := range tokenIDs { + newMetaData := txsFee.GetDefaultMetaData() + newMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + newMetaData.Name = []byte(hex.EncodeToString([]byte("name2"))) + newMetaData.Hash = []byte(hex.EncodeToString([]byte("hash2"))) + newMetaData.Attributes = []byte(hex.EncodeToString([]byte("attributes2"))) + + txDataField := bytes.Join( + [][]byte{ + []byte(core.ESDTMetaDataRecreate), + []byte(hex.EncodeToString(tokenIDs[i])), + newMetaData.Nonce, + newMetaData.Name, + []byte(hex.EncodeToString(big.NewInt(10).Bytes())), + newMetaData.Hash, + newMetaData.Attributes, + newMetaData.Uris[0], + newMetaData.Uris[1], + newMetaData.Uris[2], + }, + []byte("@"), + ) + + tx = &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[0].Bytes, + RcvAddr: addrs[0].Bytes, + GasLimit: 10_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: big.NewInt(0), + ChainID: []byte(configs.ChainID), + Version: 1, + } + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + if bytes.Equal(tokenIDs[i], tokenIDs[0]) { // nft token + checkMetaData(t, cs, addrs[0].Bytes, tokenIDs[i], shardID, newMetaData) + } else { + checkMetaData(t, cs, core.SystemAccountAddress, tokenIDs[i], shardID, newMetaData) + } + + nonce++ + } +} + +// Test scenario #5 +// +// Initial setup: Create NFT, SFT, metaESDT tokens +// +// Call ESDTMetaDataUpdate to update some of the meta data parameters +// (The sender must have the ESDTRoleNFTUpdate role) +func TestChainSimulator_ESDTMetaDataUpdate(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + log.Info("Initial setup: Create NFT, SFT and metaESDT tokens (after the activation of DynamicEsdtFlag)") + + addrs := createAddresses(t, cs, false) + + // issue metaESDT + metaESDTTicker := []byte("METATICKER") + nonce := uint64(0) + tx := issueMetaESDTTx(nonce, addrs[0].Bytes, metaESDTTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + metaESDTTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, metaESDTTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued metaESDT token id", "tokenID", string(metaESDTTokenID)) + + // issue NFT + nftTicker := []byte("NFTTICKER") + tx = issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, nftTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + // issue SFT + sftTicker := []byte("SFTTICKER") + tx = issueSemiFungibleTx(nonce, addrs[0].Bytes, sftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + sftTokenID := txResult.Logs.Events[0].Topics[0] + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, sftTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued SFT token id", "tokenID", string(sftTokenID)) + + tokenIDs := [][]byte{ + nftTokenID, + sftTokenID, + metaESDTTokenID, + } + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + sftMetaData := txsFee.GetDefaultMetaData() + sftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + esdtMetaData := txsFee.GetDefaultMetaData() + esdtMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tokensMetadata := []*txsFee.MetaData{ + nftMetaData, + sftMetaData, + esdtMetaData, + } + + for i := range tokenIDs { + tx = nftCreateTx(nonce, addrs[0].Bytes, tokenIDs[i], tokensMetadata[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + + tx = changeToDynamicTx(nonce, addrs[0].Bytes, tokenIDs[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + + roles := [][]byte{ + []byte(core.ESDTRoleNFTUpdate), + } + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, tokenIDs[i], roles) + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + log.Info("Call ESDTMetaDataUpdate to rewrite the meta data for the nft") + + for i := range tokenIDs { + newMetaData := txsFee.GetDefaultMetaData() + newMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + newMetaData.Name = []byte(hex.EncodeToString([]byte("name2"))) + newMetaData.Hash = []byte(hex.EncodeToString([]byte("hash2"))) + newMetaData.Attributes = []byte(hex.EncodeToString([]byte("attributes2"))) + + txDataField := bytes.Join( + [][]byte{ + []byte(core.ESDTMetaDataUpdate), + []byte(hex.EncodeToString(tokenIDs[i])), + newMetaData.Nonce, + newMetaData.Name, + []byte(hex.EncodeToString(big.NewInt(10).Bytes())), + newMetaData.Hash, + newMetaData.Attributes, + newMetaData.Uris[0], + newMetaData.Uris[1], + newMetaData.Uris[2], + }, + []byte("@"), + ) + + tx = &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[0].Bytes, + RcvAddr: addrs[0].Bytes, + GasLimit: 10_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: big.NewInt(0), + ChainID: []byte(configs.ChainID), + Version: 1, + } + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + if bytes.Equal(tokenIDs[i], tokenIDs[0]) { // nft token + checkMetaData(t, cs, addrs[0].Bytes, tokenIDs[i], shardID, newMetaData) + } else { + checkMetaData(t, cs, core.SystemAccountAddress, tokenIDs[i], shardID, newMetaData) + } + + nonce++ + } +} + +// Test scenario #6 +// +// Initial setup: Create NFT, SFT, metaESDT tokens +// +// Call ESDTModifyCreator and check that the creator was modified +// (The sender must have the ESDTRoleModifyCreator role) +func TestChainSimulator_ESDTModifyCreator(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + log.Info("Initial setup: Create NFT, SFT and metaESDT tokens (after the activation of DynamicEsdtFlag). Register NFT directly as dynamic") + + addrs := createAddresses(t, cs, false) + + // issue metaESDT + metaESDTTicker := []byte("METATICKER") + nonce := uint64(0) + tx := issueMetaESDTTx(nonce, addrs[1].Bytes, metaESDTTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + metaESDTTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + tx = setSpecialRoleTx(nonce, addrs[1].Bytes, addrs[1].Bytes, metaESDTTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued metaESDT token id", "tokenID", string(metaESDTTokenID)) + + // register dynamic NFT + nftTicker := []byte("NFTTICKER") + nftTokenName := []byte("tokenName") + + txDataField := bytes.Join( + [][]byte{ + []byte("registerDynamic"), + []byte(hex.EncodeToString(nftTokenName)), + []byte(hex.EncodeToString(nftTicker)), + []byte(hex.EncodeToString([]byte("NFT"))), + }, + []byte("@"), + ) + + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + tx = &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[1].Bytes, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + tx = setSpecialRoleTx(nonce, addrs[1].Bytes, addrs[1].Bytes, nftTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + // issue SFT + sftTicker := []byte("SFTTICKER") + tx = issueSemiFungibleTx(nonce, addrs[1].Bytes, sftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + sftTokenID := txResult.Logs.Events[0].Topics[0] + tx = setSpecialRoleTx(nonce, addrs[1].Bytes, addrs[1].Bytes, sftTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued SFT token id", "tokenID", string(sftTokenID)) + + tokenIDs := [][]byte{ + nftTokenID, + sftTokenID, + metaESDTTokenID, + } + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + sftMetaData := txsFee.GetDefaultMetaData() + sftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + esdtMetaData := txsFee.GetDefaultMetaData() + esdtMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tokensMetadata := []*txsFee.MetaData{ + nftMetaData, + sftMetaData, + esdtMetaData, + } + + for i := range tokenIDs { + tx = nftCreateTx(nonce, addrs[1].Bytes, tokenIDs[i], tokensMetadata[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + log.Info("Change to DYNAMIC type") + + for i := range tokenIDs { + tx = changeToDynamicTx(nonce, addrs[1].Bytes, tokenIDs[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + log.Info("Call ESDTModifyCreator and check that the creator was modified") + + mintValue := big.NewInt(10) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[1].Bytes) + + for i := range tokenIDs { + log.Info("Modify creator for token", "tokenID", tokenIDs[i]) + + newCreatorAddress, err := cs.GenerateAndMintWalletAddress(shardID, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + roles = [][]byte{ + []byte(core.ESDTRoleModifyCreator), + } + tx = setSpecialRoleTx(nonce, addrs[1].Bytes, newCreatorAddress.Bytes, tokenIDs[i], roles) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + tx = modifyCreatorTx(0, newCreatorAddress.Bytes, tokenIDs[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + retrievedMetaData := getMetaDataFromAcc(t, cs, core.SystemAccountAddress, tokenIDs[i], shardID) + + require.Equal(t, newCreatorAddress.Bytes, retrievedMetaData.Creator) + } +} + +func TestChainSimulator_ESDTModifyCreator_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + // issue metaESDT + metaESDTTicker := []byte("METATICKER") + nonce := uint64(0) + tx := issueMetaESDTTx(nonce, addrs[1].Bytes, metaESDTTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + metaESDTTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + tx = setSpecialRoleTx(nonce, addrs[1].Bytes, addrs[1].Bytes, metaESDTTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued metaESDT token id", "tokenID", string(metaESDTTokenID)) + + // register dynamic NFT + nftTicker := []byte("NFTTICKER") + nftTokenName := []byte("tokenName") + + txDataField := bytes.Join( + [][]byte{ + []byte("registerDynamic"), + []byte(hex.EncodeToString(nftTokenName)), + []byte(hex.EncodeToString(nftTicker)), + []byte(hex.EncodeToString([]byte("NFT"))), + }, + []byte("@"), + ) + + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + tx = &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[1].Bytes, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + tx = setSpecialRoleTx(nonce, addrs[1].Bytes, addrs[1].Bytes, nftTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + // issue SFT + sftTicker := []byte("SFTTICKER") + tx = issueSemiFungibleTx(nonce, addrs[1].Bytes, sftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + sftTokenID := txResult.Logs.Events[0].Topics[0] + tx = setSpecialRoleTx(nonce, addrs[1].Bytes, addrs[1].Bytes, sftTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued SFT token id", "tokenID", string(sftTokenID)) + + tokenIDs := [][]byte{ + nftTokenID, + sftTokenID, + metaESDTTokenID, + } + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + sftMetaData := txsFee.GetDefaultMetaData() + sftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + esdtMetaData := txsFee.GetDefaultMetaData() + esdtMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tokensMetadata := []*txsFee.MetaData{ + nftMetaData, + sftMetaData, + esdtMetaData, + } + + for i := range tokenIDs { + tx = nftCreateTx(nonce, addrs[1].Bytes, tokenIDs[i], tokensMetadata[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + log.Info("Change to DYNAMIC type") + + for i := range tokenIDs { + tx = changeToDynamicTx(nonce, addrs[1].Bytes, tokenIDs[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + log.Info("Call ESDTModifyCreator and check that the creator was modified") + + mintValue := big.NewInt(10) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[1].Bytes) + + crossShardID := uint32(2) + if shardID == uint32(2) { + crossShardID = uint32(1) + } + + for i := range tokenIDs { + log.Info("Modify creator for token", "tokenID", string(tokenIDs[i])) + + newCreatorAddress, err := cs.GenerateAndMintWalletAddress(crossShardID, mintValue) + require.Nil(t, err) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + roles = [][]byte{ + []byte(core.ESDTRoleModifyCreator), + } + tx = setSpecialRoleTx(nonce, addrs[1].Bytes, newCreatorAddress.Bytes, tokenIDs[i], roles) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("transfering token id", "tokenID", tokenIDs[i]) + + tx = esdtNFTTransferTx(nonce, addrs[1].Bytes, newCreatorAddress.Bytes, tokenIDs[i]) + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + tx = modifyCreatorTx(0, newCreatorAddress.Bytes, tokenIDs[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(newCreatorAddress.Bytes) + retrievedMetaData := getMetaDataFromAcc(t, cs, core.SystemAccountAddress, tokenIDs[i], shardID) + + require.Equal(t, newCreatorAddress.Bytes, retrievedMetaData.Creator) + + nonce++ + } +} + +// Test scenario #7 +// +// Initial setup: Create NFT, SFT, metaESDT tokens +// +// Call ESDTSetNewURIs and check that the new URIs were set for the token +// (The sender must have the ESDTRoleSetNewURI role) +func TestChainSimulator_ESDTSetNewURIs(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + // issue metaESDT + metaESDTTicker := []byte("METATICKER") + nonce := uint64(0) + tx := issueMetaESDTTx(nonce, addrs[0].Bytes, metaESDTTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + metaESDTTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, metaESDTTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued metaESDT token id", "tokenID", string(metaESDTTokenID)) + + // issue NFT + nftTicker := []byte("NFTTICKER") + tx = issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, nftTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + // issue SFT + sftTicker := []byte("SFTTICKER") + tx = issueSemiFungibleTx(nonce, addrs[0].Bytes, sftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + sftTokenID := txResult.Logs.Events[0].Topics[0] + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, sftTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued SFT token id", "tokenID", string(sftTokenID)) + + tokenIDs := [][]byte{ + nftTokenID, + sftTokenID, + metaESDTTokenID, + } + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + sftMetaData := txsFee.GetDefaultMetaData() + sftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + esdtMetaData := txsFee.GetDefaultMetaData() + esdtMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tokensMetadata := []*txsFee.MetaData{ + nftMetaData, + sftMetaData, + esdtMetaData, + } + + for i := range tokenIDs { + tx = nftCreateTx(nonce, addrs[0].Bytes, tokenIDs[i], tokensMetadata[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + + tx = changeToDynamicTx(nonce, addrs[0].Bytes, tokenIDs[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + + roles := [][]byte{ + []byte(core.ESDTRoleSetNewURI), + } + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, tokenIDs[i], roles) + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + log.Info("Call ESDTSetNewURIs and check that the new URIs were set for the tokens") + + metaDataNonce := []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + uris := [][]byte{ + []byte(hex.EncodeToString([]byte("uri0"))), + []byte(hex.EncodeToString([]byte("uri1"))), + []byte(hex.EncodeToString([]byte("uri2"))), + } + + expUris := [][]byte{ + []byte("uri0"), + []byte("uri1"), + []byte("uri2"), + } + + for i := range tokenIDs { + log.Info("Set new uris for token", "tokenID", string(tokenIDs[i])) + + txDataField := bytes.Join( + [][]byte{ + []byte(core.ESDTSetNewURIs), + []byte(hex.EncodeToString(tokenIDs[i])), + metaDataNonce, + uris[0], + uris[1], + uris[2], + }, + []byte("@"), + ) + + tx = &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[0].Bytes, + RcvAddr: addrs[0].Bytes, + GasLimit: 10_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: big.NewInt(0), + ChainID: []byte(configs.ChainID), + Version: 1, + } + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + var retrievedMetaData *esdt.MetaData + if bytes.Equal(tokenIDs[i], tokenIDs[0]) { // nft token + retrievedMetaData = getMetaDataFromAcc(t, cs, addrs[0].Bytes, tokenIDs[i], shardID) + } else { + retrievedMetaData = getMetaDataFromAcc(t, cs, core.SystemAccountAddress, tokenIDs[i], shardID) + } + + require.Equal(t, expUris, retrievedMetaData.URIs) + + nonce++ + } +} + +// Test scenario #8 +// +// Initial setup: Create NFT, SFT, metaESDT tokens +// +// Call ESDTModifyRoyalties and check that the royalties were changed +// (The sender must have the ESDTRoleModifyRoyalties role) +func TestChainSimulator_ESDTModifyRoyalties(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + // issue metaESDT + metaESDTTicker := []byte("METATICKER") + nonce := uint64(0) + tx := issueMetaESDTTx(nonce, addrs[0].Bytes, metaESDTTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + metaESDTTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, metaESDTTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued metaESDT token id", "tokenID", string(metaESDTTokenID)) + + // issue NFT + nftTicker := []byte("NFTTICKER") + tx = issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, nftTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + // issue SFT + sftTicker := []byte("SFTTICKER") + tx = issueSemiFungibleTx(nonce, addrs[0].Bytes, sftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + sftTokenID := txResult.Logs.Events[0].Topics[0] + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, sftTokenID, roles) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Issued SFT token id", "tokenID", string(sftTokenID)) + + tokenIDs := [][]byte{ + nftTokenID, + sftTokenID, + metaESDTTokenID, + } + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + sftMetaData := txsFee.GetDefaultMetaData() + sftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + esdtMetaData := txsFee.GetDefaultMetaData() + esdtMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tokensMetadata := []*txsFee.MetaData{ + nftMetaData, + sftMetaData, + esdtMetaData, + } + + for i := range tokenIDs { + tx = nftCreateTx(nonce, addrs[0].Bytes, tokenIDs[i], tokensMetadata[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + + tx = changeToDynamicTx(nonce, addrs[0].Bytes, tokenIDs[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + + roles := [][]byte{ + []byte(core.ESDTRoleModifyRoyalties), + } + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[0].Bytes, tokenIDs[i], roles) + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + log.Info("Call ESDTModifyRoyalties and check that the royalties were changed") + + metaDataNonce := []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + royalties := []byte(hex.EncodeToString(big.NewInt(20).Bytes())) + + for i := range tokenIDs { + log.Info("Set new royalties for token", "tokenID", string(tokenIDs[i])) + + txDataField := bytes.Join( + [][]byte{ + []byte(core.ESDTModifyRoyalties), + []byte(hex.EncodeToString(tokenIDs[i])), + metaDataNonce, + royalties, + }, + []byte("@"), + ) + + tx = &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[0].Bytes, + RcvAddr: addrs[0].Bytes, + GasLimit: 10_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: big.NewInt(0), + ChainID: []byte(configs.ChainID), + Version: 1, + } + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + shardID := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(addrs[0].Bytes) + retrievedMetaData := getMetaDataFromAcc(t, cs, addrs[0].Bytes, nftTokenID, shardID) + + require.Equal(t, uint32(big.NewInt(20).Uint64()), retrievedMetaData.Royalties) + + nonce++ + } +} + +// Test scenario #9 +// +// Initial setup: Create NFT +// +// 1. Change the nft to DYNAMIC type - the metadata should be on the system account +// 2. Send the NFT cross shard +// 3. The meta data should still be present on the system account +func TestChainSimulator_NFT_ChangeToDynamicType(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + activationEpoch := uint32(4) + + baseIssuingCost := "1000" + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.DynamicESDTEnableEpoch = activationEpoch + cfg.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost = baseIssuingCost + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + addrs := createAddresses(t, cs, true) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch) - 2) + require.Nil(t, err) + + log.Info("Initial setup: Create NFT") + + nftTicker := []byte("NFTTICKER") + nonce := uint64(0) + tx := issueNonFungibleTx(nonce, addrs[1].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + } + + nftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[1], nftTokenID, roles) + nonce++ + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tx = nftCreateTx(nonce, addrs[1].Bytes, nftTokenID, nftMetaData) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch)) + require.Nil(t, err) + + log.Info("Step 1. Change the nft to DYNAMIC type - the metadata should be on the system account") + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[1].Bytes) + + tx = changeToDynamicTx(nonce, addrs[1].Bytes, nftTokenID) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + roles = [][]byte{ + []byte(core.ESDTRoleNFTUpdate), + } + + setAddressEsdtRoles(t, cs, nonce, addrs[1], nftTokenID, roles) + nonce++ + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + checkMetaData(t, cs, core.SystemAccountAddress, nftTokenID, shardID, nftMetaData) + + log.Info("Step 2. Send the NFT cross shard") + + tx = esdtNFTTransferTx(nonce, addrs[1].Bytes, addrs[2].Bytes, nftTokenID) + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Step 3. The meta data should still be present on the system account") + + checkMetaData(t, cs, core.SystemAccountAddress, nftTokenID, shardID, nftMetaData) +} + +// Test scenario #10 +// +// Initial setup: Create SFT and send in another shard +// +// 1. change the sft meta data (differently from the previous one) in the other shard +// 2. check that the newest metadata is saved +func TestChainSimulator_ChangeMetaData(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("sft change metadata", func(t *testing.T) { + testChainSimulatorChangeMetaData(t, issueSemiFungibleTx) + }) + + t.Run("metaESDT change metadata", func(t *testing.T) { + testChainSimulatorChangeMetaData(t, issueMetaESDTTx) + }) +} + +type issueTxFunc func(uint64, []byte, []byte, string) *transaction.Transaction + +func testChainSimulatorChangeMetaData(t *testing.T, issueFn issueTxFunc) { + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, true) + + log.Info("Initial setup: Create token and send in another shard") + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + []byte(core.ESDTRoleNFTAddQuantity), + } + + ticker := []byte("TICKER") + nonce := uint64(0) + tx := issueFn(nonce, addrs[1].Bytes, ticker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + tokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[1], tokenID, roles) + nonce++ + + log.Info("Issued token id", "tokenID", string(tokenID)) + + metaData := txsFee.GetDefaultMetaData() + metaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + txDataField := bytes.Join( + [][]byte{ + []byte(core.BuiltInFunctionESDTNFTCreate), + []byte(hex.EncodeToString(tokenID)), + []byte(hex.EncodeToString(big.NewInt(2).Bytes())), // quantity + metaData.Name, + []byte(hex.EncodeToString(big.NewInt(10).Bytes())), + metaData.Hash, + metaData.Attributes, + metaData.Uris[0], + metaData.Uris[1], + metaData.Uris[2], + }, + []byte("@"), + ) + + tx = &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[1].Bytes, + RcvAddr: addrs[1].Bytes, + GasLimit: 10_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: big.NewInt(0), + ChainID: []byte(configs.ChainID), + Version: 1, + } + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + tx = changeToDynamicTx(nonce, addrs[1].Bytes, tokenID) + nonce++ + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Send to separate shards") + + tx = esdtNFTTransferTx(nonce, addrs[1].Bytes, addrs[2].Bytes, tokenID) + nonce++ + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + tx = esdtNFTTransferTx(nonce, addrs[1].Bytes, addrs[0].Bytes, tokenID) + nonce++ + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + roles = [][]byte{ + []byte(core.ESDTRoleTransfer), + []byte(core.ESDTRoleNFTUpdate), + } + tx = setSpecialRoleTx(nonce, addrs[1].Bytes, addrs[0].Bytes, tokenID, roles) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + log.Info("Step 1. change the sft meta data in one shard") + + sftMetaData2 := txsFee.GetDefaultMetaData() + sftMetaData2.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + sftMetaData2.Name = []byte(hex.EncodeToString([]byte("name2"))) + sftMetaData2.Hash = []byte(hex.EncodeToString([]byte("hash2"))) + sftMetaData2.Attributes = []byte(hex.EncodeToString([]byte("attributes2"))) + + txDataField = bytes.Join( + [][]byte{ + []byte(core.ESDTMetaDataUpdate), + []byte(hex.EncodeToString(tokenID)), + sftMetaData2.Nonce, + sftMetaData2.Name, + []byte(hex.EncodeToString(big.NewInt(10).Bytes())), + sftMetaData2.Hash, + sftMetaData2.Attributes, + sftMetaData2.Uris[0], + sftMetaData2.Uris[1], + sftMetaData2.Uris[2], + }, + []byte("@"), + ) + + tx = &transaction.Transaction{ + Nonce: 0, + SndAddr: addrs[0].Bytes, + RcvAddr: addrs[0].Bytes, + GasLimit: 10_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: big.NewInt(0), + ChainID: []byte(configs.ChainID), + Version: 1, + } + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("Step 2. check that the newest metadata is saved") + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + checkMetaData(t, cs, core.SystemAccountAddress, tokenID, shardID, sftMetaData2) +} + +func TestChainSimulator_NFT_RegisterDynamic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, true) + + log.Info("Register dynamic nft token") + + nftTicker := []byte("NFTTICKER") + nftTokenName := []byte("tokenName") + + txDataField := bytes.Join( + [][]byte{ + []byte("registerDynamic"), + []byte(hex.EncodeToString(nftTokenName)), + []byte(hex.EncodeToString(nftTicker)), + []byte(hex.EncodeToString([]byte("NFT"))), + }, + []byte("@"), + ) + + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + nonce := uint64(0) + tx := &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[0].Bytes, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tx = nftCreateTx(nonce, addrs[0].Bytes, nftTokenID, nftMetaData) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + checkMetaData(t, cs, core.SystemAccountAddress, nftTokenID, shardID, nftMetaData) + + log.Info("Check that token type is Dynamic") + + scQuery := &process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getTokenProperties", + CallValue: big.NewInt(0), + Arguments: [][]byte{nftTokenID}, + } + result, _, err := cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, "", result.ReturnMessage) + require.Equal(t, testsChainSimulator.OkReturnCode, result.ReturnCode) + + tokenType := result.ReturnData[1] + require.Equal(t, core.DynamicNFTESDT, string(tokenType)) +} + +func TestChainSimulator_MetaESDT_RegisterDynamic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, true) + + log.Info("Register dynamic metaESDT token") + + metaTicker := []byte("METATICKER") + metaTokenName := []byte("tokenName") + + decimals := big.NewInt(15) + + txDataField := bytes.Join( + [][]byte{ + []byte("registerDynamic"), + []byte(hex.EncodeToString(metaTokenName)), + []byte(hex.EncodeToString(metaTicker)), + []byte(hex.EncodeToString([]byte("META"))), + []byte(hex.EncodeToString(decimals.Bytes())), + }, + []byte("@"), + ) + + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + nonce := uint64(0) + tx := &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[0].Bytes, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tx = nftCreateTx(nonce, addrs[0].Bytes, nftTokenID, nftMetaData) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + checkMetaData(t, cs, core.SystemAccountAddress, nftTokenID, shardID, nftMetaData) + + log.Info("Check that token type is Dynamic") + + scQuery := &process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getTokenProperties", + CallValue: big.NewInt(0), + Arguments: [][]byte{nftTokenID}, + } + result, _, err := cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, "", result.ReturnMessage) + require.Equal(t, testsChainSimulator.OkReturnCode, result.ReturnCode) + + tokenType := result.ReturnData[1] + require.Equal(t, core.Dynamic+core.MetaESDT, string(tokenType)) +} + +func TestChainSimulator_FNG_RegisterDynamic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, true) + + log.Info("Register dynamic fungible token") + + metaTicker := []byte("FNGTICKER") + metaTokenName := []byte("tokenName") + + decimals := big.NewInt(15) + + txDataField := bytes.Join( + [][]byte{ + []byte("registerDynamic"), + []byte(hex.EncodeToString(metaTokenName)), + []byte(hex.EncodeToString(metaTicker)), + []byte(hex.EncodeToString([]byte("FNG"))), + []byte(hex.EncodeToString(decimals.Bytes())), + }, + []byte("@"), + ) + + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + tx := &transaction.Transaction{ + Nonce: 0, + SndAddr: addrs[0].Bytes, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + signalErrorTopic := string(txResult.Logs.Events[0].Topics[1]) + + require.Equal(t, fmt.Sprintf("cannot create %s tokens as dynamic", core.FungibleESDT), signalErrorTopic) +} + +func TestChainSimulator_NFT_RegisterAndSetAllRolesDynamic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, true) + + log.Info("Register dynamic nft token") + + nftTicker := []byte("NFTTICKER") + nftTokenName := []byte("tokenName") + + txDataField := bytes.Join( + [][]byte{ + []byte("registerAndSetAllRolesDynamic"), + []byte(hex.EncodeToString(nftTokenName)), + []byte(hex.EncodeToString(nftTicker)), + []byte(hex.EncodeToString([]byte("NFT"))), + }, + []byte("@"), + ) + + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + nonce := uint64(0) + tx := &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[0].Bytes, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tx = nftCreateTx(nonce, addrs[0].Bytes, nftTokenID, nftMetaData) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + checkMetaData(t, cs, core.SystemAccountAddress, nftTokenID, shardID, nftMetaData) + + log.Info("Check that token type is Dynamic") + + scQuery := &process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getTokenProperties", + CallValue: big.NewInt(0), + Arguments: [][]byte{nftTokenID}, + } + result, _, err := cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, "", result.ReturnMessage) + require.Equal(t, testsChainSimulator.OkReturnCode, result.ReturnCode) + + tokenType := result.ReturnData[1] + require.Equal(t, core.DynamicNFTESDT, string(tokenType)) + + log.Info("Check token roles") + + scQuery = &process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getAllAddressesAndRoles", + CallValue: big.NewInt(0), + Arguments: [][]byte{nftTokenID}, + } + result, _, err = cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, "", result.ReturnMessage) + require.Equal(t, testsChainSimulator.OkReturnCode, result.ReturnCode) + + expectedRoles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleNFTBurn), + []byte(core.ESDTRoleNFTUpdateAttributes), + []byte(core.ESDTRoleNFTAddURI), + []byte(core.ESDTRoleNFTRecreate), + []byte(core.ESDTRoleModifyCreator), + []byte(core.ESDTRoleModifyRoyalties), + []byte(core.ESDTRoleSetNewURI), + []byte(core.ESDTRoleNFTUpdate), + } + + checkTokenRoles(t, result.ReturnData, expectedRoles) +} + +func TestChainSimulator_SFT_RegisterAndSetAllRolesDynamic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, true) + + log.Info("Register dynamic sft token") + + sftTicker := []byte("SFTTICKER") + sftTokenName := []byte("tokenName") + + txDataField := bytes.Join( + [][]byte{ + []byte("registerAndSetAllRolesDynamic"), + []byte(hex.EncodeToString(sftTokenName)), + []byte(hex.EncodeToString(sftTicker)), + []byte(hex.EncodeToString([]byte("SFT"))), + }, + []byte("@"), + ) + + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + nonce := uint64(0) + tx := &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[0].Bytes, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + sftTokenID := txResult.Logs.Events[0].Topics[0] + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], sftTokenID, roles) + nonce++ + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tx = nftCreateTx(nonce, addrs[0].Bytes, sftTokenID, nftMetaData) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + checkMetaData(t, cs, core.SystemAccountAddress, sftTokenID, shardID, nftMetaData) + + log.Info("Check that token type is Dynamic") + + scQuery := &process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getTokenProperties", + CallValue: big.NewInt(0), + Arguments: [][]byte{sftTokenID}, + } + result, _, err := cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, "", result.ReturnMessage) + require.Equal(t, testsChainSimulator.OkReturnCode, result.ReturnCode) + + tokenType := result.ReturnData[1] + require.Equal(t, core.DynamicSFTESDT, string(tokenType)) + + log.Info("Check token roles") + + scQuery = &process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getAllAddressesAndRoles", + CallValue: big.NewInt(0), + Arguments: [][]byte{sftTokenID}, + } + result, _, err = cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, "", result.ReturnMessage) + require.Equal(t, testsChainSimulator.OkReturnCode, result.ReturnCode) + + expectedRoles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleNFTBurn), + []byte(core.ESDTRoleNFTUpdateAttributes), + []byte(core.ESDTRoleNFTAddURI), + []byte(core.ESDTRoleNFTRecreate), + []byte(core.ESDTRoleModifyCreator), + []byte(core.ESDTRoleModifyRoyalties), + []byte(core.ESDTRoleSetNewURI), + []byte(core.ESDTRoleNFTUpdate), + } + + checkTokenRoles(t, result.ReturnData, expectedRoles) +} + +func TestChainSimulator_FNG_RegisterAndSetAllRolesDynamic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, true) + + log.Info("Register dynamic fungible token") + + fngTicker := []byte("FNGTICKER") + fngTokenName := []byte("tokenName") + + txDataField := bytes.Join( + [][]byte{ + []byte("registerAndSetAllRolesDynamic"), + []byte(hex.EncodeToString(fngTokenName)), + []byte(hex.EncodeToString(fngTicker)), + []byte(hex.EncodeToString([]byte("FNG"))), + []byte(hex.EncodeToString(big.NewInt(10).Bytes())), + }, + []byte("@"), + ) + + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + tx := &transaction.Transaction{ + Nonce: 0, + SndAddr: addrs[0].Bytes, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + signalErrorTopic := string(txResult.Logs.Events[0].Topics[1]) + + require.Equal(t, fmt.Sprintf("cannot create %s tokens as dynamic", core.FungibleESDT), signalErrorTopic) +} + +func TestChainSimulator_MetaESDT_RegisterAndSetAllRolesDynamic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, true) + + log.Info("Register dynamic meta esdt token") + + ticker := []byte("META" + "TICKER") + tokenName := []byte("tokenName") + + decimals := big.NewInt(10) + + txDataField := bytes.Join( + [][]byte{ + []byte("registerAndSetAllRolesDynamic"), + []byte(hex.EncodeToString(tokenName)), + []byte(hex.EncodeToString(ticker)), + []byte(hex.EncodeToString([]byte("META"))), + []byte(hex.EncodeToString(decimals.Bytes())), + }, + []byte("@"), + ) + + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + nonce := uint64(0) + tx := &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[0].Bytes, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + metaTokenID := txResult.Logs.Events[0].Topics[0] + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], metaTokenID, roles) + nonce++ + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tx = nftCreateTx(nonce, addrs[0].Bytes, metaTokenID, nftMetaData) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + checkMetaData(t, cs, core.SystemAccountAddress, metaTokenID, shardID, nftMetaData) + + log.Info("Check that token type is Dynamic") + + scQuery := &process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getTokenProperties", + CallValue: big.NewInt(0), + Arguments: [][]byte{metaTokenID}, + } + result, _, err := cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, "", result.ReturnMessage) + require.Equal(t, testsChainSimulator.OkReturnCode, result.ReturnCode) + + tokenType := result.ReturnData[1] + require.Equal(t, core.Dynamic+core.MetaESDT, string(tokenType)) + + log.Info("Check token roles") + + scQuery = &process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getAllAddressesAndRoles", + CallValue: big.NewInt(0), + Arguments: [][]byte{metaTokenID}, + } + result, _, err = cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, "", result.ReturnMessage) + require.Equal(t, testsChainSimulator.OkReturnCode, result.ReturnCode) + + expectedRoles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleNFTBurn), + []byte(core.ESDTRoleNFTAddQuantity), + []byte(core.ESDTRoleNFTUpdateAttributes), + []byte(core.ESDTRoleNFTAddURI), + } + + checkTokenRoles(t, result.ReturnData, expectedRoles) +} + +func checkTokenRoles(t *testing.T, returnData [][]byte, expectedRoles [][]byte) { + for _, expRole := range expectedRoles { + found := false + + for _, item := range returnData { + if bytes.Equal(expRole, item) { + found = true + } + } + + require.True(t, found) + } +} + +func TestChainSimulator_NFTcreatedBeforeSaveToSystemAccountEnabled(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + cs, epochForDynamicNFT := getTestChainSimulatorWithSaveToSystemAccountDisabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + log.Info("Initial setup: Create NFT that will have it's metadata saved to the user account") + + nftTicker := []byte("NFTTICKER") + tx := issueNonFungibleTx(0, addrs[0].Bytes, nftTicker, baseIssuingCost) + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + nftTokenID := txResult.Logs.Events[0].Topics[0] + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + createTokenUpdateTokenIDAndTransfer(t, cs, addrs[0].Bytes, addrs[1].Bytes, nftTokenID, nftMetaData, epochForDynamicNFT, addrs[0]) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + checkMetaData(t, cs, addrs[1].Bytes, nftTokenID, shardID, nftMetaData) + checkMetaDataNotInAcc(t, cs, addrs[0].Bytes, nftTokenID, shardID) + checkMetaDataNotInAcc(t, cs, core.SystemAccountAddress, nftTokenID, shardID) +} + +func TestChainSimulator_SFTcreatedBeforeSaveToSystemAccountEnabled(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + cs, epochForDynamicNFT := getTestChainSimulatorWithSaveToSystemAccountDisabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + log.Info("Initial setup: Create SFT that will have it's metadata saved to the user account") + + sftTicker := []byte("SFTTICKER") + tx := issueSemiFungibleTx(0, addrs[0].Bytes, sftTicker, baseIssuingCost) + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + sftTokenID := txResult.Logs.Events[0].Topics[0] + + log.Info("Issued SFT token id", "tokenID", string(sftTokenID)) + + metaData := txsFee.GetDefaultMetaData() + metaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + createTokenUpdateTokenIDAndTransfer(t, cs, addrs[0].Bytes, addrs[1].Bytes, sftTokenID, metaData, epochForDynamicNFT, addrs[0]) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + checkMetaData(t, cs, core.SystemAccountAddress, sftTokenID, shardID, metaData) + checkMetaDataNotInAcc(t, cs, addrs[0].Bytes, sftTokenID, shardID) + checkMetaDataNotInAcc(t, cs, addrs[1].Bytes, sftTokenID, shardID) +} + +func TestChainSimulator_MetaESDTCreatedBeforeSaveToSystemAccountEnabled(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + cs, epochForDynamicNFT := getTestChainSimulatorWithSaveToSystemAccountDisabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + log.Info("Initial setup: Create MetaESDT that will have it's metadata saved to the user account") + + metaTicker := []byte("METATICKER") + tx := issueMetaESDTTx(0, addrs[0].Bytes, metaTicker, baseIssuingCost) + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + metaTokenID := txResult.Logs.Events[0].Topics[0] + + log.Info("Issued MetaESDT token id", "tokenID", string(metaTokenID)) + + metaData := txsFee.GetDefaultMetaData() + metaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + createTokenUpdateTokenIDAndTransfer(t, cs, addrs[0].Bytes, addrs[1].Bytes, metaTokenID, metaData, epochForDynamicNFT, addrs[0]) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + checkMetaData(t, cs, core.SystemAccountAddress, metaTokenID, shardID, metaData) + checkMetaDataNotInAcc(t, cs, addrs[0].Bytes, metaTokenID, shardID) + checkMetaDataNotInAcc(t, cs, addrs[1].Bytes, metaTokenID, shardID) +} + +func getTestChainSimulatorWithDynamicNFTEnabled(t *testing.T, baseIssuingCost string) (testsChainSimulator.ChainSimulator, int32) { + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + activationEpochForDynamicNFT := uint32(2) + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.DynamicESDTEnableEpoch = activationEpochForDynamicNFT + cfg.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost = baseIssuingCost + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpochForDynamicNFT)) + require.Nil(t, err) + + return cs, int32(activationEpochForDynamicNFT) +} + +func getTestChainSimulatorWithSaveToSystemAccountDisabled(t *testing.T, baseIssuingCost string) (testsChainSimulator.ChainSimulator, int32) { + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + activationEpochForSaveToSystemAccount := uint32(4) + activationEpochForDynamicNFT := uint32(6) + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.OptimizeNFTStoreEnableEpoch = activationEpochForSaveToSystemAccount + cfg.EpochConfig.EnableEpochs.DynamicESDTEnableEpoch = activationEpochForDynamicNFT + cfg.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost = baseIssuingCost + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpochForSaveToSystemAccount) - 2) + require.Nil(t, err) + + return cs, int32(activationEpochForDynamicNFT) +} + +func createTokenUpdateTokenIDAndTransfer( + t *testing.T, + cs testsChainSimulator.ChainSimulator, + originAddress []byte, + targetAddress []byte, + tokenID []byte, + metaData *txsFee.MetaData, + epochForDynamicNFT int32, + walletWithRoles dtos.WalletAddress, +) { + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, 1, walletWithRoles, tokenID, roles) + + tx := nftCreateTx(2, originAddress, tokenID, metaData) + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + log.Info("check that the metadata is saved on the user account") + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(originAddress) + checkMetaData(t, cs, originAddress, tokenID, shardID, metaData) + checkMetaDataNotInAcc(t, cs, core.SystemAccountAddress, tokenID, shardID) + + err = cs.GenerateBlocksUntilEpochIsReached(epochForDynamicNFT) + require.Nil(t, err) + + tx = updateTokenIDTx(3, originAddress, tokenID) + + log.Info("updating token id", "tokenID", tokenID) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + log.Info("transferring token id", "tokenID", tokenID) + + tx = esdtNFTTransferTx(4, originAddress, targetAddress, tokenID) + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) +} + +func TestChainSimulator_ChangeToDynamic_OldTokens(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, epochForDynamicNFT := getTestChainSimulatorWithSaveToSystemAccountDisabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + // issue metaESDT + metaESDTTicker := []byte("METATICKER") + nonce := uint64(0) + tx := issueMetaESDTTx(nonce, addrs[0].Bytes, metaESDTTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + metaESDTTokenID := txResult.Logs.Events[0].Topics[0] + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], metaESDTTokenID, roles) + nonce++ + + log.Info("Issued metaESDT token id", "tokenID", string(metaESDTTokenID)) + + // issue NFT + nftTicker := []byte("NFTTICKER") + tx = issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + // issue SFT + sftTicker := []byte("SFTTICKER") + tx = issueSemiFungibleTx(nonce, addrs[0].Bytes, sftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + sftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], sftTokenID, roles) + nonce++ + + log.Info("Issued SFT token id", "tokenID", string(sftTokenID)) + + tokenIDs := [][]byte{ + nftTokenID, + sftTokenID, + metaESDTTokenID, + } + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + sftMetaData := txsFee.GetDefaultMetaData() + sftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + esdtMetaData := txsFee.GetDefaultMetaData() + esdtMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tokensMetadata := []*txsFee.MetaData{ + nftMetaData, + sftMetaData, + esdtMetaData, + } + + for i := range tokenIDs { + tx = nftCreateTx(nonce, addrs[0].Bytes, tokenIDs[i], tokensMetadata[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + // meta data should be saved on account, since it is before `OptimizeNFTStoreEnableEpoch` + checkMetaData(t, cs, addrs[0].Bytes, nftTokenID, shardID, nftMetaData) + checkMetaDataNotInAcc(t, cs, core.SystemAccountAddress, nftTokenID, shardID) + + checkMetaData(t, cs, addrs[0].Bytes, sftTokenID, shardID, sftMetaData) + checkMetaDataNotInAcc(t, cs, core.SystemAccountAddress, sftTokenID, shardID) + + checkMetaData(t, cs, addrs[0].Bytes, metaESDTTokenID, shardID, esdtMetaData) + checkMetaDataNotInAcc(t, cs, core.SystemAccountAddress, metaESDTTokenID, shardID) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(epochForDynamicNFT)) + require.Nil(t, err) + + log.Info("Change to DYNAMIC type") + + // it will not be able to change nft to dynamic type + for i := range tokenIDs { + tx = changeToDynamicTx(nonce, addrs[0].Bytes, tokenIDs[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + for _, tokenID := range tokenIDs { + tx = updateTokenIDTx(nonce, addrs[0].Bytes, tokenID) + + log.Info("updating token id", "tokenID", tokenID) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + for _, tokenID := range tokenIDs { + log.Info("transfering token id", "tokenID", tokenID) + + tx = esdtNFTTransferTx(nonce, addrs[0].Bytes, addrs[1].Bytes, tokenID) + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + checkMetaData(t, cs, core.SystemAccountAddress, sftTokenID, shardID, sftMetaData) + checkMetaDataNotInAcc(t, cs, addrs[0].Bytes, sftTokenID, shardID) + checkMetaDataNotInAcc(t, cs, addrs[1].Bytes, sftTokenID, shardID) + + checkMetaData(t, cs, core.SystemAccountAddress, metaESDTTokenID, shardID, esdtMetaData) + checkMetaDataNotInAcc(t, cs, addrs[0].Bytes, metaESDTTokenID, shardID) + checkMetaDataNotInAcc(t, cs, addrs[1].Bytes, metaESDTTokenID, shardID) + + checkMetaData(t, cs, addrs[1].Bytes, nftTokenID, shardID, nftMetaData) + checkMetaDataNotInAcc(t, cs, addrs[0].Bytes, nftTokenID, shardID) + checkMetaDataNotInAcc(t, cs, core.SystemAccountAddress, nftTokenID, shardID) +} + +func TestChainSimulator_CreateAndPause_NFT(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + activationEpoch := uint32(4) + + baseIssuingCost := "1000" + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.DynamicESDTEnableEpoch = activationEpoch + cfg.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost = baseIssuingCost + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch) - 1) + require.Nil(t, err) + + // issue NFT + nftTicker := []byte("NFTTICKER") + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + txDataField := bytes.Join( + [][]byte{ + []byte("issueNonFungible"), + []byte(hex.EncodeToString([]byte("asdname"))), + []byte(hex.EncodeToString(nftTicker)), + []byte(hex.EncodeToString([]byte("canPause"))), + []byte(hex.EncodeToString([]byte("true"))), + }, + []byte("@"), + ) + + nonce := uint64(0) + tx := &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[0].Bytes, + RcvAddr: core.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + nftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tx = nftCreateTx(nonce, addrs[0].Bytes, nftTokenID, nftMetaData) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + log.Info("check that the metadata for all tokens is saved on the system account") + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + checkMetaData(t, cs, core.SystemAccountAddress, nftTokenID, shardID, nftMetaData) + + log.Info("Pause all tokens") + + scQuery := &process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + CallerAddr: addrs[0].Bytes, + FuncName: "pause", + CallValue: big.NewInt(0), + Arguments: [][]byte{nftTokenID}, + } + result, _, err := cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, "", result.ReturnMessage) + require.Equal(t, testsChainSimulator.OkReturnCode, result.ReturnCode) + + log.Info("wait for DynamicEsdtFlag activation") + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch)) + require.Nil(t, err) + + log.Info("make an updateTokenID@tokenID function call on the ESDTSystem SC for all token types") + + tx = updateTokenIDTx(nonce, addrs[0].Bytes, nftTokenID) + nonce++ + + log.Info("updating token id", "tokenID", nftTokenID) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("check that the metadata for all tokens is saved on the system account") + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + checkMetaData(t, cs, core.SystemAccountAddress, nftTokenID, shardID, nftMetaData) + + log.Info("transfer the tokens to another account") + + log.Info("transfering token id", "tokenID", nftTokenID) + + tx = esdtNFTTransferTx(nonce, addrs[0].Bytes, addrs[1].Bytes, nftTokenID) + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("check that the metaData for the NFT is still on the system account") + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + shardID = cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[2].Bytes) + + checkMetaData(t, cs, addrs[1].Bytes, nftTokenID, shardID, nftMetaData) + checkMetaDataNotInAcc(t, cs, addrs[0].Bytes, nftTokenID, shardID) + checkMetaDataNotInAcc(t, cs, core.SystemAccountAddress, nftTokenID, shardID) +} + +func TestChainSimulator_CreateAndPauseTokens_DynamicNFT(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + activationEpoch := uint32(4) + + baseIssuingCost := "1000" + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.DynamicESDTEnableEpoch = activationEpoch + cfg.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost = baseIssuingCost + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + addrs := createAddresses(t, cs, false) + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch) - 1) + require.Nil(t, err) + + log.Info("Step 2. wait for DynamicEsdtFlag activation") + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch)) + require.Nil(t, err) + + // register dynamic NFT + nftTicker := []byte("NFTTICKER") + nftTokenName := []byte("tokenName") + + txDataField := bytes.Join( + [][]byte{ + []byte("registerDynamic"), + []byte(hex.EncodeToString(nftTokenName)), + []byte(hex.EncodeToString(nftTicker)), + []byte(hex.EncodeToString([]byte("NFT"))), + []byte(hex.EncodeToString([]byte("canPause"))), + []byte(hex.EncodeToString([]byte("true"))), + }, + []byte("@"), + ) + + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + nonce := uint64(0) + tx := &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[0].Bytes, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + []byte(core.ESDTRoleNFTUpdate), + } + + nftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tx = nftCreateTx(nonce, addrs[0].Bytes, nftTokenID, nftMetaData) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + log.Info("Step 1. check that the metadata for all tokens is saved on the system account") + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + checkMetaData(t, cs, core.SystemAccountAddress, nftTokenID, shardID, nftMetaData) + + log.Info("Step 1b. Pause all tokens") + + scQuery := &process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + CallerAddr: addrs[0].Bytes, + FuncName: "pause", + CallValue: big.NewInt(0), + Arguments: [][]byte{nftTokenID}, + } + result, _, err := cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, "", result.ReturnMessage) + require.Equal(t, testsChainSimulator.OkReturnCode, result.ReturnCode) + + tx = updateTokenIDTx(nonce, addrs[0].Bytes, nftTokenID) + nonce++ + + log.Info("updating token id", "tokenID", nftTokenID) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("change to dynamic token") + + tx = changeToDynamicTx(nonce, addrs[0].Bytes, nftTokenID) + nonce++ + + log.Info("updating token id", "tokenID", nftTokenID) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("check that the metadata for all tokens is saved on the system account") + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + checkMetaData(t, cs, core.SystemAccountAddress, nftTokenID, shardID, nftMetaData) + + log.Info("transfering token id", "tokenID", nftTokenID) + + tx = esdtNFTTransferTx(nonce, addrs[0].Bytes, addrs[1].Bytes, nftTokenID) + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + log.Info("check that the metaData for the NFT is still on the system account") + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + shardID = cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[2].Bytes) + + checkMetaData(t, cs, core.SystemAccountAddress, nftTokenID, shardID, nftMetaData) + checkMetaDataNotInAcc(t, cs, addrs[0].Bytes, nftTokenID, shardID) + checkMetaDataNotInAcc(t, cs, addrs[1].Bytes, nftTokenID, shardID) +} + +func TestChainSimulator_CheckRolesWhichHasToBeSingular(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + baseIssuingCost := "1000" + + cs, _ := getTestChainSimulatorWithDynamicNFTEnabled(t, baseIssuingCost) + defer cs.Close() + + addrs := createAddresses(t, cs, true) + + // register dynamic NFT + nftTicker := []byte("NFTTICKER") + nftTokenName := []byte("tokenName") + + txDataField := bytes.Join( + [][]byte{ + []byte("registerDynamic"), + []byte(hex.EncodeToString(nftTokenName)), + []byte(hex.EncodeToString(nftTicker)), + []byte(hex.EncodeToString([]byte("NFT"))), + []byte(hex.EncodeToString([]byte("canPause"))), + []byte(hex.EncodeToString([]byte("true"))), + }, + []byte("@"), + ) + + callValue, _ := big.NewInt(0).SetString(baseIssuingCost, 10) + + nonce := uint64(0) + tx := &transaction.Transaction{ + Nonce: nonce, + SndAddr: addrs[0].Bytes, + RcvAddr: vm.ESDTSCAddress, + GasLimit: 100_000_000, + GasPrice: minGasPrice, + Signature: []byte("dummySig"), + Data: txDataField, + Value: callValue, + ChainID: []byte(configs.ChainID), + Version: 1, + } + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleNFTUpdateAttributes), + []byte(core.ESDTRoleNFTAddURI), + []byte(core.ESDTRoleSetNewURI), + []byte(core.ESDTRoleModifyCreator), + []byte(core.ESDTRoleModifyRoyalties), + []byte(core.ESDTRoleNFTRecreate), + []byte(core.ESDTRoleNFTUpdate), + } + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + for _, role := range roles { + tx = setSpecialRoleTx(nonce, addrs[0].Bytes, addrs[1].Bytes, nftTokenID, [][]byte{role}) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + if txResult.Logs != nil && len(txResult.Logs.Events) > 0 { + returnMessage := string(txResult.Logs.Events[0].Topics[1]) + require.True(t, strings.Contains(returnMessage, "already exists")) + } else { + require.Fail(t, "should have been return error message") + } + } +} diff --git a/integrationTests/chainSimulator/vm/esdtTokens_test.go b/integrationTests/chainSimulator/vm/esdtTokens_test.go new file mode 100644 index 00000000000..d12bfcbb550 --- /dev/null +++ b/integrationTests/chainSimulator/vm/esdtTokens_test.go @@ -0,0 +1,401 @@ +package vm + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + "net/http" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/api/groups" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/stretchr/testify/require" +) + +type esdtTokensCompleteResponseData struct { + Tokens map[string]groups.ESDTNFTTokenData `json:"esdts"` +} + +type esdtTokensCompleteResponse struct { + Data esdtTokensCompleteResponseData `json:"data"` + Error string `json:"error"` + Code string +} + +func TestChainSimulator_Api_TokenType(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + activationEpoch := uint32(2) + + baseIssuingCost := "1000" + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewFreePortAPIConfigurator("localhost"), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.DynamicESDTEnableEpoch = activationEpoch + cfg.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost = baseIssuingCost + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch)) + require.Nil(t, err) + + log.Info("Initial setup: Create tokens") + + addrs := createAddresses(t, cs, false) + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + + // issue fungible + fungibleTicker := []byte("FUNTICKER") + nonce := uint64(0) + tx := issueTx(nonce, addrs[0].Bytes, fungibleTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + fungibleTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], fungibleTokenID, roles) + nonce++ + + log.Info("Issued fungible token id", "tokenID", string(fungibleTokenID)) + + // issue NFT + nftTicker := []byte("NFTTICKER") + tx = issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + // issue SFT + sftTicker := []byte("SFTTICKER") + tx = issueSemiFungibleTx(nonce, addrs[0].Bytes, sftTicker, baseIssuingCost) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + sftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], sftTokenID, roles) + nonce++ + + log.Info("Issued SFT token id", "tokenID", string(sftTokenID)) + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + sftMetaData := txsFee.GetDefaultMetaData() + sftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + fungibleMetaData := txsFee.GetDefaultMetaData() + fungibleMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tokenIDs := [][]byte{ + nftTokenID, + sftTokenID, + } + + tokensMetadata := []*txsFee.MetaData{ + nftMetaData, + sftMetaData, + } + + for i := range tokenIDs { + tx = nftCreateTx(nonce, addrs[0].Bytes, tokenIDs[i], tokensMetadata[i]) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + nonce++ + } + + err = cs.GenerateBlocks(10) + require.Nil(t, err) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + restAPIInterfaces := cs.GetRestAPIInterfaces() + require.NotNil(t, restAPIInterfaces) + + url := fmt.Sprintf("http://%s/address/%s/esdt", restAPIInterfaces[shardID], addrs[0].Bech32) + response := &esdtTokensCompleteResponse{} + + doHTTPClientGetReq(t, url, response) + + allTokens := response.Data.Tokens + + require.Equal(t, 3, len(allTokens)) + + expTokenID := string(fungibleTokenID) + tokenData, ok := allTokens[expTokenID] + require.True(t, ok) + require.Equal(t, expTokenID, tokenData.TokenIdentifier) + require.Equal(t, core.FungibleESDT, tokenData.Type) + + expTokenID = string(nftTokenID) + "-01" + tokenData, ok = allTokens[expTokenID] + require.True(t, ok) + require.Equal(t, expTokenID, tokenData.TokenIdentifier) + require.Equal(t, core.NonFungibleESDTv2, tokenData.Type) + + expTokenID = string(sftTokenID) + "-01" + tokenData, ok = allTokens[expTokenID] + require.True(t, ok) + require.Equal(t, expTokenID, tokenData.TokenIdentifier) + require.Equal(t, core.SemiFungibleESDT, tokenData.Type) +} + +func TestChainSimulator_Api_NFTToken(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + activationEpoch := uint32(2) + + baseIssuingCost := "1000" + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewFreePortAPIConfigurator("localhost"), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.DynamicESDTEnableEpoch = activationEpoch + cfg.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost = baseIssuingCost + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch) - 1) + require.Nil(t, err) + + log.Info("Initial setup: Create NFT token before activation") + + addrs := createAddresses(t, cs, false) + + roles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleTransfer), + } + + // issue NFT + nftTicker := []byte("NFTTICKER") + nonce := uint64(0) + tx := issueNonFungibleTx(nonce, addrs[0].Bytes, nftTicker, baseIssuingCost) + nonce++ + + txResult, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + nftTokenID := txResult.Logs.Events[0].Topics[0] + setAddressEsdtRoles(t, cs, nonce, addrs[0], nftTokenID, roles) + nonce++ + + log.Info("Issued NFT token id", "tokenID", string(nftTokenID)) + + nftMetaData := txsFee.GetDefaultMetaData() + nftMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + + tx = nftCreateTx(nonce, addrs[0].Bytes, nftTokenID, nftMetaData) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + err = cs.GenerateBlocks(5) + require.Nil(t, err) + + shardID := cs.GetNodeHandler(0).GetProcessComponents().ShardCoordinator().ComputeId(addrs[0].Bytes) + + restAPIInterfaces := cs.GetRestAPIInterfaces() + require.NotNil(t, restAPIInterfaces) + + url := fmt.Sprintf("http://%s/address/%s/esdt", restAPIInterfaces[shardID], addrs[0].Bech32) + response := &esdtTokensCompleteResponse{} + + doHTTPClientGetReq(t, url, response) + + allTokens := response.Data.Tokens + + require.Equal(t, 1, len(allTokens)) + + expTokenID := string(nftTokenID) + "-01" + tokenData, ok := allTokens[expTokenID] + require.True(t, ok) + require.Equal(t, expTokenID, tokenData.TokenIdentifier) + require.Equal(t, "", tokenData.Type) + + log.Info("Wait for DynamicESDTFlag activation") + + err = cs.GenerateBlocksUntilEpochIsReached(int32(activationEpoch)) + require.Nil(t, err) + + doHTTPClientGetReq(t, url, response) + + allTokens = response.Data.Tokens + + require.Equal(t, 1, len(allTokens)) + + expTokenID = string(nftTokenID) + "-01" + tokenData, ok = allTokens[expTokenID] + require.True(t, ok) + require.Equal(t, expTokenID, tokenData.TokenIdentifier) + require.Equal(t, "", tokenData.Type) + + log.Info("Update token id", "tokenID", nftTokenID) + + tx = updateTokenIDTx(nonce, addrs[0].Bytes, nftTokenID) + nonce++ + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + doHTTPClientGetReq(t, url, response) + + allTokens = response.Data.Tokens + + require.Equal(t, 1, len(allTokens)) + + expTokenID = string(nftTokenID) + "-01" + tokenData, ok = allTokens[expTokenID] + require.True(t, ok) + require.Equal(t, expTokenID, tokenData.TokenIdentifier) + require.Equal(t, "", tokenData.Type) + + log.Info("Transfer token id", "tokenID", nftTokenID) + + tx = esdtNFTTransferTx(nonce, addrs[0].Bytes, addrs[1].Bytes, nftTokenID) + nonce++ + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + require.Equal(t, "success", txResult.Status.String()) + + url = fmt.Sprintf("http://%s/address/%s/esdt", restAPIInterfaces[1], addrs[1].Bech32) + doHTTPClientGetReq(t, url, response) + + allTokens = response.Data.Tokens + + require.Equal(t, 1, len(allTokens)) + + expTokenID = string(nftTokenID) + "-01" + tokenData, ok = allTokens[expTokenID] + require.True(t, ok) + require.Equal(t, expTokenID, tokenData.TokenIdentifier) + require.Equal(t, core.NonFungibleESDTv2, tokenData.Type) + + log.Info("Change to DYNAMIC type") + + tx = changeToDynamicTx(nonce, addrs[0].Bytes, nftTokenID) + + txResult, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txResult) + + require.Equal(t, "success", txResult.Status.String()) + + response = &esdtTokensCompleteResponse{} + doHTTPClientGetReq(t, url, response) + + allTokens = response.Data.Tokens + + require.Equal(t, 1, len(allTokens)) + + expTokenID = string(nftTokenID) + "-01" + tokenData, ok = allTokens[expTokenID] + require.True(t, ok) + require.Equal(t, expTokenID, tokenData.TokenIdentifier) + require.Equal(t, core.NonFungibleESDTv2, tokenData.Type) +} + +func doHTTPClientGetReq(t *testing.T, url string, response interface{}) { + httpClient := &http.Client{} + + req, err := http.NewRequest(http.MethodGet, url, nil) + require.Nil(t, err) + + resp, err := httpClient.Do(req) + require.Nil(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + + jsonParser := json.NewDecoder(resp.Body) + err = jsonParser.Decode(&response) + require.Nil(t, err) +} diff --git a/integrationTests/common.go b/integrationTests/common.go new file mode 100644 index 00000000000..e4365471cd7 --- /dev/null +++ b/integrationTests/common.go @@ -0,0 +1,38 @@ +package integrationTests + +import ( + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +// ProcessSCOutputAccounts will save account changes in accounts db from vmOutput +func ProcessSCOutputAccounts(vmOutput *vmcommon.VMOutput, accountsDB state.AccountsAdapter) error { + outputAccounts := process.SortVMOutputInsideData(vmOutput) + for _, outAcc := range outputAccounts { + acc := stakingcommon.LoadUserAccount(accountsDB, outAcc.Address) + + storageUpdates := process.GetSortedStorageUpdates(outAcc) + for _, storeUpdate := range storageUpdates { + err := acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) + if err != nil { + return err + } + + if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { + err = acc.AddToBalance(outAcc.BalanceDelta) + if err != nil { + return err + } + } + + err = accountsDB.SaveAccount(acc) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/integrationTests/factory/consensusComponents/consensusComponents_test.go b/integrationTests/factory/consensusComponents/consensusComponents_test.go index 5be694c740d..f560f099705 100644 --- a/integrationTests/factory/consensusComponents/consensusComponents_test.go +++ b/integrationTests/factory/consensusComponents/consensusComponents_test.go @@ -67,6 +67,7 @@ func TestConsensusComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/factory/dataComponents/dataComponents_test.go b/integrationTests/factory/dataComponents/dataComponents_test.go index 9ebc4a49fc5..c28a41c6543 100644 --- a/integrationTests/factory/dataComponents/dataComponents_test.go +++ b/integrationTests/factory/dataComponents/dataComponents_test.go @@ -13,6 +13,10 @@ import ( ) func TestDataComponents_Create_Close_ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + time.Sleep(time.Second * 4) gc := goroutines.NewGoCounter(goroutines.TestsRelevantGoRoutines) diff --git a/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go b/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go index 0bd34fd45e4..9082ce63c06 100644 --- a/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go +++ b/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go @@ -67,6 +67,7 @@ func TestHeartbeatComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/factory/processComponents/processComponents_test.go b/integrationTests/factory/processComponents/processComponents_test.go index d81d921e74c..2f2c859bc94 100644 --- a/integrationTests/factory/processComponents/processComponents_test.go +++ b/integrationTests/factory/processComponents/processComponents_test.go @@ -68,6 +68,7 @@ func TestProcessComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/factory/statusComponents/statusComponents_test.go b/integrationTests/factory/statusComponents/statusComponents_test.go index 9865ce593ce..62e2ad1e289 100644 --- a/integrationTests/factory/statusComponents/statusComponents_test.go +++ b/integrationTests/factory/statusComponents/statusComponents_test.go @@ -68,6 +68,7 @@ func TestStatusComponents_Create_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/frontend/wallet/txInterception_test.go b/integrationTests/frontend/wallet/txInterception_test.go index 1cb60ea8a46..1eeacc61f94 100644 --- a/integrationTests/frontend/wallet/txInterception_test.go +++ b/integrationTests/frontend/wallet/txInterception_test.go @@ -15,6 +15,10 @@ import ( const mintingValue = "100000000" func TestInterceptedTxWithoutDataField(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + value := big.NewInt(0) value.SetString("999", 10) @@ -35,6 +39,10 @@ func TestInterceptedTxWithoutDataField(t *testing.T) { } func TestInterceptedTxWithDataField(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + value := big.NewInt(0) value.SetString("999", 10) @@ -55,6 +63,10 @@ func TestInterceptedTxWithDataField(t *testing.T) { } func TestInterceptedTxWithSigningOverTxHash(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + value := big.NewInt(0) value.SetString("1000000000000000000", 10) diff --git a/integrationTests/interface.go b/integrationTests/interface.go index abe0b1a7be8..e4be7fe388c 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -96,6 +96,7 @@ type Facade interface { EncodeAddressPubkey(pk []byte) (string, error) GetThrottlerForEndpoint(endpoint string) (core.Throttler, bool) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) ExecuteSCQuery(*process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) DecodeAddressPubkey(pk string) ([]byte, error) GetProof(rootHash string, address string) (*common.GetProofResponse, error) @@ -113,6 +114,7 @@ type Facade interface { IsDataTrieMigrated(address string, options api.AccountQueryOptions) (bool, error) GetManagedKeysCount() int GetManagedKeys() []string + GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) diff --git a/integrationTests/miniNetwork.go b/integrationTests/miniNetwork.go new file mode 100644 index 00000000000..e9c64f5606d --- /dev/null +++ b/integrationTests/miniNetwork.go @@ -0,0 +1,113 @@ +package integrationTests + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/data/transaction" +) + +// MiniNetwork is a mini network, useful for some integration tests +type MiniNetwork struct { + Round uint64 + Nonce uint64 + + Nodes []*TestProcessorNode + ShardNode *TestProcessorNode + MetachainNode *TestProcessorNode + Users map[string]*TestWalletAccount +} + +// NewMiniNetwork creates a MiniNetwork +func NewMiniNetwork() *MiniNetwork { + n := &MiniNetwork{} + + nodes := CreateNodes( + 1, + 1, + 1, + ) + + n.Nodes = nodes + n.ShardNode = nodes[0] + n.MetachainNode = nodes[1] + n.Users = make(map[string]*TestWalletAccount) + + return n +} + +// Stop stops the mini network +func (n *MiniNetwork) Stop() { + n.ShardNode.Close() + n.MetachainNode.Close() +} + +// FundAccount funds an account +func (n *MiniNetwork) FundAccount(address []byte, value *big.Int) { + shard := n.MetachainNode.ShardCoordinator.ComputeId(address) + + if shard == n.MetachainNode.ShardCoordinator.SelfId() { + MintAddress(n.MetachainNode.AccntState, address, value) + } else { + MintAddress(n.ShardNode.AccntState, address, value) + } +} + +// AddUser adds a user (account) to the mini network +func (n *MiniNetwork) AddUser(balance *big.Int) *TestWalletAccount { + user := CreateTestWalletAccount(n.ShardNode.ShardCoordinator, 0) + n.Users[string(user.Address)] = user + n.FundAccount(user.Address, balance) + return user +} + +// Start starts the mini network +func (n *MiniNetwork) Start() { + n.Round = 1 + n.Nonce = 1 +} + +// Continue advances processing with a number of rounds +func (n *MiniNetwork) Continue(t *testing.T, numRounds int) { + idxProposers := []int{0, 1} + + for i := int64(0); i < int64(numRounds); i++ { + n.Nonce, n.Round = ProposeAndSyncOneBlock(t, n.Nodes, idxProposers, n.Round, n.Nonce) + } +} + +// SendTransaction sends a transaction +func (n *MiniNetwork) SendTransaction( + senderPubkey []byte, + receiverPubkey []byte, + value *big.Int, + data string, + additionalGasLimit uint64, +) (string, error) { + sender, ok := n.Users[string(senderPubkey)] + if !ok { + return "", fmt.Errorf("unknown sender: %s", hex.EncodeToString(senderPubkey)) + } + + tx := &transaction.Transaction{ + Nonce: sender.Nonce, + Value: new(big.Int).Set(value), + SndAddr: sender.Address, + RcvAddr: receiverPubkey, + Data: []byte(data), + GasPrice: MinTxGasPrice, + GasLimit: MinTxGasLimit + uint64(len(data)) + additionalGasLimit, + ChainID: ChainID, + Version: MinTransactionVersion, + } + + txBuff, _ := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer, TestTxSignHasher) + tx.Signature, _ = sender.SingleSigner.Sign(sender.SkTxSign, txBuff) + txHash, err := n.ShardNode.SendTransaction(tx) + + sender.Nonce++ + + return txHash, err +} diff --git a/integrationTests/mock/builtInCostHandlerStub.go b/integrationTests/mock/builtInCostHandlerStub.go deleted file mode 100644 index 4ee3b23b062..00000000000 --- a/integrationTests/mock/builtInCostHandlerStub.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { -} - -// ComputeBuiltInCost - -func (b *BuiltInCostHandlerStub) ComputeBuiltInCost(_ data.TransactionWithFeeHandler) uint64 { - return 1 -} - -// IsBuiltInFuncCall - -func (b *BuiltInCostHandlerStub) IsBuiltInFuncCall(_ data.TransactionWithFeeHandler) bool { - return false -} - -// IsInterfaceNil - -func (b *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return b == nil -} diff --git a/integrationTests/mock/epochRewardsCreatorStub.go b/integrationTests/mock/epochRewardsCreatorStub.go deleted file mode 100644 index 22c425f3e41..00000000000 --- a/integrationTests/mock/epochRewardsCreatorStub.go +++ /dev/null @@ -1,109 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochRewardsCreatorStub - -type EpochRewardsCreatorStub struct { - CreateRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) (block.MiniBlockSlice, error) - VerifyRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) error - CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte - SaveBlockDataToStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - DeleteBlockDataFromStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - RemoveBlockDataFromPoolsCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - GetRewardsTxsCalled func(body *block.Body) map[string]data.TransactionHandler - GetProtocolSustainCalled func() *big.Int - GetLocalTxCacheCalled func() epochStart.TransactionCacher -} - -// GetProtocolSustainabilityRewards - -func (e *EpochRewardsCreatorStub) GetProtocolSustainabilityRewards() *big.Int { - if e.GetProtocolSustainCalled != nil { - return e.GetProtocolSustainCalled() - } - return big.NewInt(0) -} - -// GetLocalTxCache - -func (e *EpochRewardsCreatorStub) GetLocalTxCache() epochStart.TransactionCacher { - if e.GetLocalTxCacheCalled != nil { - return e.GetLocalTxCacheCalled() - } - return &TxForCurrentBlockStub{} -} - -// CreateRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) (block.MiniBlockSlice, error) { - if e.CreateRewardsMiniBlocksCalled != nil { - return e.CreateRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil, nil -} - -// GetRewardsTxs - -func (e *EpochRewardsCreatorStub) GetRewardsTxs(body *block.Body) map[string]data.TransactionHandler { - if e.GetRewardsTxsCalled != nil { - return e.GetRewardsTxsCalled(body) - } - return nil -} - -// VerifyRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) error { - if e.VerifyRewardsMiniBlocksCalled != nil { - return e.VerifyRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil -} - -// CreateMarshalledData - -func (e *EpochRewardsCreatorStub) CreateMarshalledData(body *block.Body) map[string][][]byte { - if e.CreateMarshalledDataCalled != nil { - return e.CreateMarshalledDataCalled(body) - } - return nil -} - -// SaveBlockDataToStorage - -func (e *EpochRewardsCreatorStub) SaveBlockDataToStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.SaveBlockDataToStorageCalled != nil { - e.SaveBlockDataToStorageCalled(metaBlock, body) - } -} - -// DeleteBlockDataFromStorage - -func (e *EpochRewardsCreatorStub) DeleteBlockDataFromStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.DeleteBlockDataFromStorageCalled != nil { - e.DeleteBlockDataFromStorageCalled(metaBlock, body) - } -} - -// IsInterfaceNil - -func (e *EpochRewardsCreatorStub) IsInterfaceNil() bool { - return e == nil -} - -// RemoveBlockDataFromPools - -func (e *EpochRewardsCreatorStub) RemoveBlockDataFromPools(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.RemoveBlockDataFromPoolsCalled != nil { - e.RemoveBlockDataFromPoolsCalled(metaBlock, body) - } -} diff --git a/integrationTests/mock/epochValidatorInfoCreatorStub.go b/integrationTests/mock/epochValidatorInfoCreatorStub.go deleted file mode 100644 index 445d305596e..00000000000 --- a/integrationTests/mock/epochValidatorInfoCreatorStub.go +++ /dev/null @@ -1,86 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochValidatorInfoCreatorStub - -type EpochValidatorInfoCreatorStub struct { - CreateValidatorInfoMiniBlocksCalled func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) - VerifyValidatorInfoMiniBlocksCalled func(miniblocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error - GetLocalValidatorInfoCacheCalled func() epochStart.ValidatorInfoCacher - CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte - GetValidatorInfoTxsCalled func(body *block.Body) map[string]*state.ShardValidatorInfo - SaveBlockDataToStorageCalled func(metaBlock data.HeaderHandler, body *block.Body) - DeleteBlockDataFromStorageCalled func(metaBlock data.HeaderHandler, body *block.Body) - RemoveBlockDataFromPoolsCalled func(metaBlock data.HeaderHandler, body *block.Body) -} - -// CreateValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { - if e.CreateValidatorInfoMiniBlocksCalled != nil { - return e.CreateValidatorInfoMiniBlocksCalled(validatorInfo) - } - return make(block.MiniBlockSlice, 0), nil -} - -// VerifyValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error { - if e.VerifyValidatorInfoMiniBlocksCalled != nil { - return e.VerifyValidatorInfoMiniBlocksCalled(miniBlocks, validatorsInfo) - } - return nil -} - -// GetLocalValidatorInfoCache - -func (e *EpochValidatorInfoCreatorStub) GetLocalValidatorInfoCache() epochStart.ValidatorInfoCacher { - if e.GetLocalValidatorInfoCacheCalled != nil { - return e.GetLocalValidatorInfoCacheCalled() - } - return nil -} - -// CreateMarshalledData - -func (e *EpochValidatorInfoCreatorStub) CreateMarshalledData(body *block.Body) map[string][][]byte { - if e.CreateMarshalledDataCalled != nil { - return e.CreateMarshalledDataCalled(body) - } - return nil -} - -// GetValidatorInfoTxs - -func (e *EpochValidatorInfoCreatorStub) GetValidatorInfoTxs(body *block.Body) map[string]*state.ShardValidatorInfo { - if e.GetValidatorInfoTxsCalled != nil { - return e.GetValidatorInfoTxsCalled(body) - } - return nil -} - -// SaveBlockDataToStorage - -func (e *EpochValidatorInfoCreatorStub) SaveBlockDataToStorage(metaBlock data.HeaderHandler, body *block.Body) { - if e.SaveBlockDataToStorageCalled != nil { - e.SaveBlockDataToStorageCalled(metaBlock, body) - } -} - -// DeleteBlockDataFromStorage - -func (e *EpochValidatorInfoCreatorStub) DeleteBlockDataFromStorage(metaBlock data.HeaderHandler, body *block.Body) { - if e.DeleteBlockDataFromStorageCalled != nil { - e.DeleteBlockDataFromStorageCalled(metaBlock, body) - } -} - -// IsInterfaceNil - -func (e *EpochValidatorInfoCreatorStub) IsInterfaceNil() bool { - return e == nil -} - -// RemoveBlockDataFromPools - -func (e *EpochValidatorInfoCreatorStub) RemoveBlockDataFromPools(metaBlock data.HeaderHandler, body *block.Body) { - if e.RemoveBlockDataFromPoolsCalled != nil { - e.RemoveBlockDataFromPoolsCalled(metaBlock, body) - } -} diff --git a/integrationTests/mock/intermediateTransactionHandlerMock.go b/integrationTests/mock/intermediateTransactionHandlerMock.go index 77e60169ee7..f86d69ff63e 100644 --- a/integrationTests/mock/intermediateTransactionHandlerMock.go +++ b/integrationTests/mock/intermediateTransactionHandlerMock.go @@ -7,7 +7,7 @@ import ( // IntermediateTransactionHandlerMock - type IntermediateTransactionHandlerMock struct { - AddIntermediateTransactionsCalled func(txs []data.TransactionHandler) error + AddIntermediateTransactionsCalled func(txs []data.TransactionHandler, key []byte) error GetNumOfCrossInterMbsAndTxsCalled func() (int, int) CreateAllInterMiniBlocksCalled func() []*block.MiniBlock VerifyInterMiniBlocksCalled func(body *block.Body) error @@ -16,7 +16,7 @@ type IntermediateTransactionHandlerMock struct { CreateMarshalledDataCalled func(txHashes [][]byte) ([][]byte, error) GetAllCurrentFinishedTxsCalled func() map[string]data.TransactionHandler RemoveProcessedResultsCalled func(key []byte) [][]byte - InitProcessedResultsCalled func(key []byte) + InitProcessedResultsCalled func(key []byte, parentKey []byte) intermediateTransactions []data.TransactionHandler } @@ -29,9 +29,9 @@ func (ith *IntermediateTransactionHandlerMock) RemoveProcessedResults(key []byte } // InitProcessedResults - -func (ith *IntermediateTransactionHandlerMock) InitProcessedResults(key []byte) { +func (ith *IntermediateTransactionHandlerMock) InitProcessedResults(key []byte, parentKey []byte) { if ith.InitProcessedResultsCalled != nil { - ith.InitProcessedResultsCalled(key) + ith.InitProcessedResultsCalled(key, parentKey) } } @@ -57,12 +57,12 @@ func (ith *IntermediateTransactionHandlerMock) CreateMarshalledData(txHashes [][ } // AddIntermediateTransactions - -func (ith *IntermediateTransactionHandlerMock) AddIntermediateTransactions(txs []data.TransactionHandler) error { +func (ith *IntermediateTransactionHandlerMock) AddIntermediateTransactions(txs []data.TransactionHandler, key []byte) error { if ith.AddIntermediateTransactionsCalled == nil { ith.intermediateTransactions = append(ith.intermediateTransactions, txs...) return nil } - return ith.AddIntermediateTransactionsCalled(txs) + return ith.AddIntermediateTransactionsCalled(txs, key) } // GetIntermediateTransactions - diff --git a/integrationTests/mock/processComponentsStub.go b/integrationTests/mock/processComponentsStub.go index e5a94dd78c1..04dad00a52c 100644 --- a/integrationTests/mock/processComponentsStub.go +++ b/integrationTests/mock/processComponentsStub.go @@ -59,6 +59,9 @@ type ProcessComponentsStub struct { ProcessedMiniBlocksTrackerInternal process.ProcessedMiniBlocksTracker ReceiptsRepositoryInternal factory.ReceiptsRepository ESDTDataStorageHandlerForAPIInternal vmcommon.ESDTNFTStorageHandler + SentSignaturesTrackerInternal process.SentSignaturesTracker + EpochSystemSCProcessorInternal process.EpochStartSystemSCProcessor + RelayedTxV3ProcessorField process.RelayedTxV3Processor } // Create - @@ -290,6 +293,21 @@ func (pcs *ProcessComponentsStub) ESDTDataStorageHandlerForAPI() vmcommon.ESDTNF return pcs.ESDTDataStorageHandlerForAPIInternal } +// SentSignaturesTracker - +func (pcs *ProcessComponentsStub) SentSignaturesTracker() process.SentSignaturesTracker { + return pcs.SentSignaturesTrackerInternal +} + +// EpochSystemSCProcessor - +func (pcs *ProcessComponentsStub) EpochSystemSCProcessor() process.EpochStartSystemSCProcessor { + return pcs.EpochSystemSCProcessorInternal +} + +// RelayedTxV3Processor - +func (pcs *ProcessComponentsStub) RelayedTxV3Processor() process.RelayedTxV3Processor { + return pcs.RelayedTxV3ProcessorField +} + // IsInterfaceNil - func (pcs *ProcessComponentsStub) IsInterfaceNil() bool { return pcs == nil diff --git a/integrationTests/mock/transactionCoordinatorMock.go b/integrationTests/mock/transactionCoordinatorMock.go index d3671b1d77b..c002c52cc0f 100644 --- a/integrationTests/mock/transactionCoordinatorMock.go +++ b/integrationTests/mock/transactionCoordinatorMock.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/processedMb" ) @@ -29,7 +30,7 @@ type TransactionCoordinatorMock struct { VerifyCreatedBlockTransactionsCalled func(hdr data.HeaderHandler, body *block.Body) error CreatePostProcessMiniBlocksCalled func() block.MiniBlockSlice VerifyCreatedMiniBlocksCalled func(hdr data.HeaderHandler, body *block.Body) error - AddIntermediateTransactionsCalled func(mapSCRs map[block.Type][]data.TransactionHandler) error + AddIntermediateTransactionsCalled func(mapSCRs map[block.Type][]data.TransactionHandler, key []byte) error GetAllIntermediateTxsCalled func() map[block.Type]map[string]data.TransactionHandler AddTxsFromMiniBlocksCalled func(miniBlocks block.MiniBlockSlice) AddTransactionsCalled func(txHandlers []data.TransactionHandler, blockType block.Type) @@ -213,12 +214,12 @@ func (tcm *TransactionCoordinatorMock) VerifyCreatedMiniBlocks(hdr data.HeaderHa } // AddIntermediateTransactions - -func (tcm *TransactionCoordinatorMock) AddIntermediateTransactions(mapSCRs map[block.Type][]data.TransactionHandler) error { +func (tcm *TransactionCoordinatorMock) AddIntermediateTransactions(mapSCRs map[block.Type][]data.TransactionHandler, key []byte) error { if tcm.AddIntermediateTransactionsCalled == nil { return nil } - return tcm.AddIntermediateTransactionsCalled(mapSCRs) + return tcm.AddIntermediateTransactionsCalled(mapSCRs, key) } // GetAllIntermediateTxs - diff --git a/integrationTests/mock/validatorStatisticsProcessorStub.go b/integrationTests/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 34a0e35cad1..00000000000 --- a/integrationTests/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,130 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - return nil -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false -} diff --git a/integrationTests/mock/validatorsProviderStub.go b/integrationTests/mock/validatorsProviderStub.go deleted file mode 100644 index 98ea652340b..00000000000 --- a/integrationTests/mock/validatorsProviderStub.go +++ /dev/null @@ -1,28 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data/validator" -) - -// ValidatorsProviderStub - -type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics -} - -// GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { - if vp.GetLatestValidatorsCalled != nil { - return vp.GetLatestValidatorsCalled() - } - return nil -} - -// Close - -func (vp *ValidatorsProviderStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { - return vp == nil -} diff --git a/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go index cf104b736db..eec61878296 100644 --- a/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "fmt" "math/big" - "sync" "testing" "time" @@ -14,13 +13,14 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-crypto-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { @@ -61,15 +61,15 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { proposerNode := nodes[0] - //sender shard keys, receivers keys + // sender shard keys, receivers keys sendersPrivateKeys := make([]crypto.PrivateKey, 3) receiversPublicKeys := make(map[uint32][]crypto.PublicKey) for i := 0; i < txToGenerateInEachMiniBlock; i++ { sendersPrivateKeys[i], _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) - //receivers in same shard with the sender + // receivers in same shard with the sender _, pk, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) receiversPublicKeys[senderShard] = append(receiversPublicKeys[senderShard], pk) - //receivers in other shards + // receivers in other shards for _, shardId := range recvShards { _, pk, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, shardId) receiversPublicKeys[shardId] = append(receiversPublicKeys[shardId], pk) @@ -111,13 +111,13 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { continue } - //test sender balances + // test sender balances for _, sk := range sendersPrivateKeys { valTransferred := big.NewInt(0).Mul(totalValuePerTx, big.NewInt(int64(len(receiversPublicKeys)))) valRemaining := big.NewInt(0).Sub(valMinting, valTransferred) integrationTests.TestPrivateKeyHasBalance(t, n, sk, valRemaining) } - //test receiver balances from same shard + // test receiver balances from same shard for _, pk := range receiversPublicKeys[proposerNode.ShardCoordinator.SelfId()] { integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } @@ -136,7 +136,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { continue } - //test receiver balances from same shard + // test receiver balances from same shard for _, pk := range receiversPublicKeys[n.ShardCoordinator.SelfId()] { integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } @@ -352,87 +352,6 @@ func TestSimpleTransactionsWithMoreValueThanBalanceYieldReceiptsInMultiShardedEn } } -func TestExecuteBlocksWithGapsBetweenBlocks(t *testing.T) { - //TODO fix this test - t.Skip("TODO fix this test") - if testing.Short() { - t.Skip("this is not a short test") - } - nodesPerShard := 2 - shardConsensusGroupSize := 2 - nbMetaNodes := 400 - nbShards := 1 - consensusGroupSize := 400 - - cacheMut := &sync.Mutex{} - - putCounter := 0 - cacheMap := make(map[string]interface{}) - - // create map of shard - testNodeProcessors for metachain and shard chain - nodesMap := integrationTests.CreateNodesWithNodesCoordinatorWithCacher( - nodesPerShard, - nbMetaNodes, - nbShards, - shardConsensusGroupSize, - consensusGroupSize, - ) - - roundsPerEpoch := uint64(1000) - maxGasLimitPerBlock := uint64(100000) - gasPrice := uint64(10) - gasLimit := uint64(100) - for _, nodes := range nodesMap { - integrationTests.SetEconomicsParameters(nodes, maxGasLimitPerBlock, gasPrice, gasLimit) - integrationTests.DisplayAndStartNodes(nodes[0:1]) - - for _, node := range nodes { - node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) - } - } - - defer func() { - for _, nodes := range nodesMap { - for _, n := range nodes { - n.Close() - } - } - }() - - round := uint64(1) - roundDifference := 10 - nonce := uint64(1) - - firstNodeOnMeta := nodesMap[core.MetachainShardId][0] - body, header, _ := firstNodeOnMeta.ProposeBlock(round, nonce) - - // set bitmap for all consensus nodes signing - bitmap := make([]byte, consensusGroupSize/8+1) - for i := range bitmap { - bitmap[i] = 0xFF - } - - bitmap[consensusGroupSize/8] >>= uint8(8 - (consensusGroupSize % 8)) - err := header.SetPubKeysBitmap(bitmap) - assert.Nil(t, err) - - firstNodeOnMeta.CommitBlock(body, header) - - round += uint64(roundDifference) - nonce++ - putCounter = 0 - - cacheMut.Lock() - for k := range cacheMap { - delete(cacheMap, k) - } - cacheMut.Unlock() - - firstNodeOnMeta.ProposeBlock(round, nonce) - - assert.Equal(t, roundDifference, putCounter) -} - // TestShouldSubtractTheCorrectTxFee uses the mock VM as it's gas model is predictable // The test checks the tx fee subtraction from the sender account when deploying a SC // It also checks the fee obtained by the leader is correct diff --git a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go index d89abd3aae5..dd964aeb745 100644 --- a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go +++ b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go @@ -23,6 +23,9 @@ func TestEpochStartChangeWithContinuousTransactionsInMultiShardedEnvironment(t * StakingV2EnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go index b7b658e4ca2..d14eb086de6 100644 --- a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go +++ b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go @@ -22,6 +22,9 @@ func TestEpochStartChangeWithoutTransactionInMultiShardedEnvironment(t *testing. StakingV2EnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 8ce1b1a72ec..ce933a22666 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -33,6 +33,8 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/scheduledDataSyncer" @@ -67,6 +69,9 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( @@ -149,7 +154,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui pksBytes := integrationTests.CreatePkBytes(uint32(numOfShards)) address := []byte("afafafafafafafafafafafafafafafaf") - nodesConfig := &mock.NodesSetupStub{ + nodesConfig := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) for i := uint32(0); i < uint32(numOfShards); i++ { @@ -181,7 +186,6 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui return integrationTests.MinTransactionVersion }, } - defer func() { errRemoveDir := os.RemoveAll("Epoch_0") assert.NoError(t, errRemoveDir) @@ -217,7 +221,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui cryptoComponents.BlKeyGen = &mock.KeyGenMock{} cryptoComponents.TxKeyGen = &mock.KeyGenMock{} - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = integrationTests.TestMarshalizer coreComponents.TxMarshalizerField = integrationTests.TestMarshalizer coreComponents.HasherField = integrationTests.TestHasher @@ -231,12 +235,17 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui coreComponents.ChanStopNodeProcessField = endProcess.GetDummyEndProcessChannel() coreComponents.HardforkTriggerPubKeyField = []byte("provided hardfork pub key") + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &marshallerMock.MarshalizerMock{}, + 444, + ) argsBootstrapHandler := bootstrap.ArgsEpochStartBootstrap{ - CryptoComponentsHolder: cryptoComponents, - CoreComponentsHolder: coreComponents, - MainMessenger: nodeToJoinLate.MainMessenger, - FullArchiveMessenger: nodeToJoinLate.FullArchiveMessenger, - GeneralConfig: generalConfig, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, + CryptoComponentsHolder: cryptoComponents, + CoreComponentsHolder: coreComponents, + MainMessenger: nodeToJoinLate.MainMessenger, + FullArchiveMessenger: nodeToJoinLate.FullArchiveMessenger, + GeneralConfig: generalConfig, PrefsConfig: config.PreferencesConfig{ FullArchive: false, }, diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index d2881004d0e..81c9e4652f4 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -21,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" vmFactory "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" commonMocks "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/dblookupext" @@ -387,7 +388,7 @@ func hardForkImport( defaults.FillGasMapInternal(gasSchedule, 1) log.Warn("started import process") - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = integrationTests.TestMarshalizer coreComponents.TxMarshalizerField = integrationTests.TestMarshalizer coreComponents.HasherField = integrationTests.TestHasher @@ -406,8 +407,6 @@ func hardForkImport( dataComponents.DataPool = node.DataPool dataComponents.BlockChain = node.BlockChain - roundConfig := integrationTests.GetDefaultRoundsConfig() - argsGenesis := process.ArgsGenesisBlockCreator{ GenesisTime: 0, StartEpochNum: 100, @@ -424,6 +423,7 @@ func hardForkImport( WasmVMVersions: []config.WasmVMVersionByEpoch{ {StartEpoch: 0, Version: "*"}, }, + TransferAndExecuteByUserAddresses: []string{"erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe3"}, }, HardForkConfig: config.HardforkConfig{ ImportFolder: node.ExportFolder, @@ -466,6 +466,8 @@ func hardForkImport( MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -476,11 +478,17 @@ func hardForkImport( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, AccountsParser: &genesisMocks.AccountsParserStub{}, SmartContractParser: &mock.SmartContractParserStub{}, BlockSignKeyGen: &mock.KeyGenMock{}, - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: config.EnableEpochs{ BuiltInFunctionsEnableEpoch: 0, SCDeployEnableEpoch: 0, @@ -492,7 +500,8 @@ func hardForkImport( DelegationSmartContractEnableEpoch: 0, }, }, - RoundConfig: &roundConfig, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, } @@ -560,7 +569,7 @@ func createHardForkExporter( returnedConfigs[node.ShardCoordinator.SelfId()] = append(returnedConfigs[node.ShardCoordinator.SelfId()], exportConfig) returnedConfigs[node.ShardCoordinator.SelfId()] = append(returnedConfigs[node.ShardCoordinator.SelfId()], keysConfig) - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = integrationTests.TestMarshalizer coreComponents.TxMarshalizerField = integrationTests.TestTxSignMarshalizer coreComponents.HasherField = integrationTests.TestHasher diff --git a/integrationTests/multiShard/relayedTx/common.go b/integrationTests/multiShard/relayedTx/common.go index f875dbb6f8b..c2bc8e5995c 100644 --- a/integrationTests/multiShard/relayedTx/common.go +++ b/integrationTests/multiShard/relayedTx/common.go @@ -8,21 +8,37 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" ) // CreateGeneralSetupForRelayTxTest will create the general setup for relayed transactions -func CreateGeneralSetupForRelayTxTest() ([]*integrationTests.TestProcessorNode, []int, []*integrationTests.TestWalletAccount, *integrationTests.TestWalletAccount) { +func CreateGeneralSetupForRelayTxTest(relayedV3Test bool) ([]*integrationTests.TestProcessorNode, []int, []*integrationTests.TestWalletAccount, *integrationTests.TestWalletAccount) { + initialVal := big.NewInt(10000000000) + epochsConfig := integrationTests.GetDefaultEnableEpochsConfig() + if !relayedV3Test { + epochsConfig.RelayedTransactionsV3EnableEpoch = integrationTests.UnreachableEpoch + epochsConfig.FixRelayedBaseCostEnableEpoch = integrationTests.UnreachableEpoch + } + nodes, idxProposers := createAndMintNodes(initialVal, epochsConfig) + + players, relayerAccount := createAndMintPlayers(relayedV3Test, nodes, initialVal) + + return nodes, idxProposers, players, relayerAccount +} + +func createAndMintNodes(initialVal *big.Int, enableEpochsConfig *config.EnableEpochs) ([]*integrationTests.TestProcessorNode, []int) { numOfShards := 2 nodesPerShard := 2 numMetachainNodes := 1 - nodes := integrationTests.CreateNodes( + nodes := integrationTests.CreateNodesWithEnableEpochsConfig( numOfShards, nodesPerShard, numMetachainNodes, + enableEpochsConfig, ) idxProposers := make([]int, numOfShards+1) @@ -33,21 +49,32 @@ func CreateGeneralSetupForRelayTxTest() ([]*integrationTests.TestProcessorNode, integrationTests.DisplayAndStartNodes(nodes) - initialVal := big.NewInt(1000000000) integrationTests.MintAllNodes(nodes, initialVal) + return nodes, idxProposers +} + +func createAndMintPlayers( + intraShard bool, + nodes []*integrationTests.TestProcessorNode, + initialVal *big.Int, +) ([]*integrationTests.TestWalletAccount, *integrationTests.TestWalletAccount) { + relayerShard := uint32(0) numPlayers := 5 numShards := nodes[0].ShardCoordinator.NumberOfShards() players := make([]*integrationTests.TestWalletAccount, numPlayers) for i := 0; i < numPlayers; i++ { shardId := uint32(i) % numShards + if intraShard { + shardId = relayerShard + } players[i] = integrationTests.CreateTestWalletAccount(nodes[0].ShardCoordinator, shardId) } - relayerAccount := integrationTests.CreateTestWalletAccount(nodes[0].ShardCoordinator, 0) + relayerAccount := integrationTests.CreateTestWalletAccount(nodes[0].ShardCoordinator, relayerShard) integrationTests.MintAllPlayers(nodes, []*integrationTests.TestWalletAccount{relayerAccount}, initialVal) - return nodes, idxProposers, players, relayerAccount + return players, relayerAccount } // CreateAndSendRelayedAndUserTx will create and send a relayed user transaction @@ -59,10 +86,10 @@ func CreateAndSendRelayedAndUserTx( value *big.Int, gasLimit uint64, txData []byte, -) *transaction.Transaction { +) (*transaction.Transaction, *transaction.Transaction) { txDispatcherNode := getNodeWithinSameShardAsPlayer(nodes, relayer.Address) - userTx := createUserTx(player, rcvAddr, value, gasLimit, txData) + userTx := createUserTx(player, rcvAddr, value, gasLimit, txData, nil) relayedTx := createRelayedTx(txDispatcherNode.EconomicsData, relayer, userTx) _, err := txDispatcherNode.SendTransaction(relayedTx) @@ -70,7 +97,7 @@ func CreateAndSendRelayedAndUserTx( fmt.Println(err.Error()) } - return relayedTx + return relayedTx, userTx } // CreateAndSendRelayedAndUserTxV2 will create and send a relayed user transaction for relayed v2 @@ -82,10 +109,10 @@ func CreateAndSendRelayedAndUserTxV2( value *big.Int, gasLimit uint64, txData []byte, -) *transaction.Transaction { +) (*transaction.Transaction, *transaction.Transaction) { txDispatcherNode := getNodeWithinSameShardAsPlayer(nodes, relayer.Address) - userTx := createUserTx(player, rcvAddr, value, 0, txData) + userTx := createUserTx(player, rcvAddr, value, 0, txData, nil) relayedTx := createRelayedTxV2(txDispatcherNode.EconomicsData, relayer, userTx, gasLimit) _, err := txDispatcherNode.SendTransaction(relayedTx) @@ -93,7 +120,30 @@ func CreateAndSendRelayedAndUserTxV2( fmt.Println(err.Error()) } - return relayedTx + return relayedTx, userTx +} + +// CreateAndSendRelayedAndUserTxV3 will create and send a relayed user transaction for relayed v3 +func CreateAndSendRelayedAndUserTxV3( + nodes []*integrationTests.TestProcessorNode, + relayer *integrationTests.TestWalletAccount, + player *integrationTests.TestWalletAccount, + rcvAddr []byte, + value *big.Int, + gasLimit uint64, + txData []byte, +) (*transaction.Transaction, *transaction.Transaction) { + txDispatcherNode := getNodeWithinSameShardAsPlayer(nodes, relayer.Address) + + userTx := createUserTx(player, rcvAddr, value, gasLimit, txData, relayer.Address) + relayedTx := createRelayedTxV3(txDispatcherNode.EconomicsData, relayer, userTx) + + _, err := txDispatcherNode.SendTransaction(relayedTx) + if err != nil { + fmt.Println(err.Error()) + } + + return relayedTx, userTx } func createUserTx( @@ -102,21 +152,24 @@ func createUserTx( value *big.Int, gasLimit uint64, txData []byte, + relayerAddress []byte, ) *transaction.Transaction { tx := &transaction.Transaction{ - Nonce: player.Nonce, - Value: big.NewInt(0).Set(value), - RcvAddr: rcvAddr, - SndAddr: player.Address, - GasPrice: integrationTests.MinTxGasPrice, - GasLimit: gasLimit, - Data: txData, - ChainID: integrationTests.ChainID, - Version: integrationTests.MinTransactionVersion, + Nonce: player.Nonce, + Value: big.NewInt(0).Set(value), + RcvAddr: rcvAddr, + SndAddr: player.Address, + GasPrice: integrationTests.MinTxGasPrice, + GasLimit: gasLimit, + Data: txData, + ChainID: integrationTests.ChainID, + Version: integrationTests.MinTransactionVersion, + RelayerAddr: relayerAddress, } txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer, integrationTests.TestTxSignHasher) tx.Signature, _ = player.SingleSigner.Sign(player.SkTxSign, txBuff) player.Nonce++ + player.Balance.Sub(player.Balance, value) return tx } @@ -130,7 +183,7 @@ func createRelayedTx( txData := core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshaled) tx := &transaction.Transaction{ Nonce: relayer.Nonce, - Value: big.NewInt(0).Set(userTx.Value), + Value: big.NewInt(0), RcvAddr: userTx.SndAddr, SndAddr: relayer.Address, GasPrice: integrationTests.MinTxGasPrice, @@ -144,9 +197,11 @@ func createRelayedTx( txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer, integrationTests.TestTxSignHasher) tx.Signature, _ = relayer.SingleSigner.Sign(relayer.SkTxSign, txBuff) relayer.Nonce++ + + relayer.Balance.Sub(relayer.Balance, tx.Value) + txFee := economicsFee.ComputeTxFee(tx) relayer.Balance.Sub(relayer.Balance, txFee) - relayer.Balance.Sub(relayer.Balance, tx.Value) return tx } @@ -173,10 +228,43 @@ func createRelayedTxV2( txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer, integrationTests.TestTxSignHasher) tx.Signature, _ = relayer.SingleSigner.Sign(relayer.SkTxSign, txBuff) relayer.Nonce++ + + relayer.Balance.Sub(relayer.Balance, tx.Value) + txFee := economicsFee.ComputeTxFee(tx) relayer.Balance.Sub(relayer.Balance, txFee) + + return tx +} + +func createRelayedTxV3( + economicsFee process.FeeHandler, + relayer *integrationTests.TestWalletAccount, + userTx *transaction.Transaction, +) *transaction.Transaction { + tx := &transaction.Transaction{ + Nonce: relayer.Nonce, + Value: big.NewInt(0), + RcvAddr: relayer.Address, + SndAddr: relayer.Address, + GasPrice: integrationTests.MinTxGasPrice, + Data: []byte(""), + ChainID: userTx.ChainID, + Version: userTx.Version, + InnerTransactions: []*transaction.Transaction{userTx}, + } + gasLimit := economicsFee.ComputeGasLimit(tx) + tx.GasLimit = userTx.GasLimit + gasLimit + + txBuff, _ := tx.GetDataForSigning(integrationTests.TestAddressPubkeyConverter, integrationTests.TestTxSignMarshalizer, integrationTests.TestTxSignHasher) + tx.Signature, _ = relayer.SingleSigner.Sign(relayer.SkTxSign, txBuff) + relayer.Nonce++ + relayer.Balance.Sub(relayer.Balance, tx.Value) + txFee := economicsFee.ComputeTxFee(tx) + relayer.Balance.Sub(relayer.Balance, txFee) + return tx } @@ -190,7 +278,7 @@ func createAndSendSimpleTransaction( ) { txDispatcherNode := getNodeWithinSameShardAsPlayer(nodes, player.Address) - userTx := createUserTx(player, rcvAddr, value, gasLimit, txData) + userTx := createUserTx(player, rcvAddr, value, gasLimit, txData, nil) _, err := txDispatcherNode.SendTransaction(userTx) if err != nil { fmt.Println(err.Error()) diff --git a/integrationTests/multiShard/relayedTx/edgecases/edgecases_test.go b/integrationTests/multiShard/relayedTx/edgecases/edgecases_test.go index 246a81fbe15..72e7bafda2e 100644 --- a/integrationTests/multiShard/relayedTx/edgecases/edgecases_test.go +++ b/integrationTests/multiShard/relayedTx/edgecases/edgecases_test.go @@ -16,7 +16,7 @@ func TestRelayedTransactionInMultiShardEnvironmentWithNormalTxButWrongNonceShoul t.Skip("this is not a short test") } - nodes, idxProposers, players, relayer := relayedTx.CreateGeneralSetupForRelayTxTest() + nodes, idxProposers, players, relayer := relayedTx.CreateGeneralSetupForRelayTxTest(false) defer func() { for _, n := range nodes { n.Close() @@ -32,18 +32,12 @@ func TestRelayedTransactionInMultiShardEnvironmentWithNormalTxButWrongNonceShoul receiverAddress1 := []byte("12345678901234567890123456789012") receiverAddress2 := []byte("12345678901234567890123456789011") - totalFees := big.NewInt(0) - relayerInitialValue := big.NewInt(0).Set(relayer.Balance) nrRoundsToTest := int64(5) for i := int64(0); i < nrRoundsToTest; i++ { for _, player := range players { player.Nonce += 1 - relayerTx := relayedTx.CreateAndSendRelayedAndUserTx(nodes, relayer, player, receiverAddress1, sendValue, integrationTests.MinTxGasLimit, []byte("")) - totalFee := nodes[0].EconomicsData.ComputeTxFee(relayerTx) - totalFees.Add(totalFees, totalFee) - relayerTx = relayedTx.CreateAndSendRelayedAndUserTx(nodes, relayer, player, receiverAddress2, sendValue, integrationTests.MinTxGasLimit, []byte("")) - totalFee = nodes[0].EconomicsData.ComputeTxFee(relayerTx) - totalFees.Add(totalFees, totalFee) + _, _ = relayedTx.CreateAndSendRelayedAndUserTx(nodes, relayer, player, receiverAddress1, sendValue, integrationTests.MinTxGasLimit, []byte("")) + _, _ = relayedTx.CreateAndSendRelayedAndUserTx(nodes, relayer, player, receiverAddress2, sendValue, integrationTests.MinTxGasLimit, []byte("")) } round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) @@ -71,9 +65,8 @@ func TestRelayedTransactionInMultiShardEnvironmentWithNormalTxButWrongNonceShoul assert.Equal(t, uint64(0), account.GetNonce()) } - expectedBalance := big.NewInt(0).Sub(relayerInitialValue, totalFees) relayerAccount := relayedTx.GetUserAccount(nodes, relayer.Address) - assert.True(t, relayerAccount.GetBalance().Cmp(expectedBalance) == 0) + assert.True(t, relayerAccount.GetBalance().Cmp(relayer.Balance) == 0) } func TestRelayedTransactionInMultiShardEnvironmentWithNormalTxButWithTooMuchGas(t *testing.T) { @@ -81,7 +74,7 @@ func TestRelayedTransactionInMultiShardEnvironmentWithNormalTxButWithTooMuchGas( t.Skip("this is not a short test") } - nodes, idxProposers, players, relayer := relayedTx.CreateGeneralSetupForRelayTxTest() + nodes, idxProposers, players, relayer := relayedTx.CreateGeneralSetupForRelayTxTest(false) defer func() { for _, n := range nodes { n.Close() @@ -100,10 +93,16 @@ func TestRelayedTransactionInMultiShardEnvironmentWithNormalTxButWithTooMuchGas( additionalGasLimit := uint64(100000) tooMuchGasLimit := integrationTests.MinTxGasLimit + additionalGasLimit nrRoundsToTest := int64(5) + + txsSentEachRound := big.NewInt(2) // 2 relayed txs each round + txsSentPerPlayer := big.NewInt(0).Mul(txsSentEachRound, big.NewInt(nrRoundsToTest)) + initialPlayerFunds := big.NewInt(0).Mul(sendValue, txsSentPerPlayer) + integrationTests.MintAllPlayers(nodes, players, initialPlayerFunds) + for i := int64(0); i < nrRoundsToTest; i++ { for _, player := range players { - _ = relayedTx.CreateAndSendRelayedAndUserTx(nodes, relayer, player, receiverAddress1, sendValue, tooMuchGasLimit, []byte("")) - _ = relayedTx.CreateAndSendRelayedAndUserTx(nodes, relayer, player, receiverAddress2, sendValue, tooMuchGasLimit, []byte("")) + _, _ = relayedTx.CreateAndSendRelayedAndUserTx(nodes, relayer, player, receiverAddress1, sendValue, tooMuchGasLimit, []byte("")) + _, _ = relayedTx.CreateAndSendRelayedAndUserTx(nodes, relayer, player, receiverAddress2, sendValue, tooMuchGasLimit, []byte("")) } round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) @@ -124,8 +123,8 @@ func TestRelayedTransactionInMultiShardEnvironmentWithNormalTxButWithTooMuchGas( finalBalance := big.NewInt(0).Mul(big.NewInt(int64(len(players))), big.NewInt(nrRoundsToTest)) finalBalance.Mul(finalBalance, sendValue) - assert.Equal(t, receiver1.GetBalance().Cmp(finalBalance), 0) - assert.Equal(t, receiver2.GetBalance().Cmp(finalBalance), 0) + assert.Equal(t, 0, receiver1.GetBalance().Cmp(finalBalance)) + assert.Equal(t, 0, receiver2.GetBalance().Cmp(finalBalance)) players = append(players, relayer) checkPlayerBalancesWithPenalization(t, nodes, players) @@ -139,7 +138,7 @@ func checkPlayerBalancesWithPenalization( for i := 0; i < len(players); i++ { userAcc := relayedTx.GetUserAccount(nodes, players[i].Address) - assert.Equal(t, userAcc.GetBalance().Cmp(players[i].Balance), 0) + assert.Equal(t, 0, userAcc.GetBalance().Cmp(players[i].Balance)) assert.Equal(t, userAcc.GetNonce(), players[i].Nonce) } } diff --git a/integrationTests/multiShard/relayedTx/relayedTxV2_test.go b/integrationTests/multiShard/relayedTx/relayedTxV2_test.go deleted file mode 100644 index 9e23eeac1aa..00000000000 --- a/integrationTests/multiShard/relayedTx/relayedTxV2_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package relayedTx - -import ( - "encoding/hex" - "math/big" - "testing" - "time" - - "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-go/integrationTests" - "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" - vmFactory "github.com/multiversx/mx-chain-go/process/factory" - "github.com/stretchr/testify/assert" -) - -func TestRelayedTransactionV2InMultiShardEnvironmentWithSmartContractTX(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - - nodes, idxProposers, players, relayer := CreateGeneralSetupForRelayTxTest() - defer func() { - for _, n := range nodes { - n.Close() - } - }() - - sendValue := big.NewInt(5) - round := uint64(0) - nonce := uint64(0) - round = integrationTests.IncrementAndPrintRound(round) - nonce++ - - receiverAddress1 := []byte("12345678901234567890123456789012") - receiverAddress2 := []byte("12345678901234567890123456789011") - - ownerNode := nodes[0] - initialSupply := "00" + hex.EncodeToString(big.NewInt(100000000000).Bytes()) - scCode := wasm.GetSCCode("../../vm/wasm/testdata/erc20-c-03/wrc20_wasm.wasm") - scAddress, _ := ownerNode.BlockchainHook.NewAddress(ownerNode.OwnAccount.Address, ownerNode.OwnAccount.Nonce, vmFactory.WasmVirtualMachine) - - integrationTests.CreateAndSendTransactionWithGasLimit( - nodes[0], - big.NewInt(0), - 20000, - make([]byte, 32), - []byte(wasm.CreateDeployTxData(scCode)+"@"+initialSupply), - integrationTests.ChainID, - integrationTests.MinTransactionVersion, - ) - - transferTokenVMGas := uint64(7200) - transferTokenBaseGas := ownerNode.EconomicsData.ComputeGasLimit(&transaction.Transaction{Data: []byte("transferToken@" + hex.EncodeToString(receiverAddress1) + "@00" + hex.EncodeToString(sendValue.Bytes()))}) - transferTokenFullGas := transferTokenBaseGas + transferTokenVMGas - - initialTokenSupply := big.NewInt(1000000000) - initialPlusForGas := uint64(1000) - for _, player := range players { - integrationTests.CreateAndSendTransactionWithGasLimit( - ownerNode, - big.NewInt(0), - transferTokenFullGas+initialPlusForGas, - scAddress, - []byte("transferToken@"+hex.EncodeToString(player.Address)+"@00"+hex.EncodeToString(initialTokenSupply.Bytes())), - integrationTests.ChainID, - integrationTests.MinTransactionVersion, - ) - } - time.Sleep(time.Second) - - nrRoundsToTest := int64(5) - for i := int64(0); i < nrRoundsToTest; i++ { - round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) - - for _, player := range players { - _ = CreateAndSendRelayedAndUserTxV2(nodes, relayer, player, scAddress, big.NewInt(0), - transferTokenFullGas, []byte("transferToken@"+hex.EncodeToString(receiverAddress1)+"@00"+hex.EncodeToString(sendValue.Bytes()))) - _ = CreateAndSendRelayedAndUserTxV2(nodes, relayer, player, scAddress, big.NewInt(0), - transferTokenFullGas, []byte("transferToken@"+hex.EncodeToString(receiverAddress2)+"@00"+hex.EncodeToString(sendValue.Bytes()))) - } - - time.Sleep(integrationTests.StepDelay) - } - - roundToPropagateMultiShard := int64(20) - for i := int64(0); i <= roundToPropagateMultiShard; i++ { - round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) - } - - time.Sleep(time.Second) - - finalBalance := big.NewInt(0).Mul(big.NewInt(int64(len(players))), big.NewInt(nrRoundsToTest)) - finalBalance.Mul(finalBalance, sendValue) - - checkSCBalance(t, ownerNode, scAddress, receiverAddress1, finalBalance) - checkSCBalance(t, ownerNode, scAddress, receiverAddress1, finalBalance) - - checkPlayerBalances(t, nodes, players) - - userAcc := GetUserAccount(nodes, relayer.Address) - assert.Equal(t, 1, userAcc.GetBalance().Cmp(relayer.Balance)) -} diff --git a/integrationTests/multiShard/relayedTx/relayedTx_test.go b/integrationTests/multiShard/relayedTx/relayedTx_test.go index 9ca8c5a6d34..d9ea772d7ba 100644 --- a/integrationTests/multiShard/relayedTx/relayedTx_test.go +++ b/integrationTests/multiShard/relayedTx/relayedTx_test.go @@ -21,331 +21,395 @@ import ( "github.com/stretchr/testify/require" ) +type createAndSendRelayedAndUserTxFuncType = func( + nodes []*integrationTests.TestProcessorNode, + relayer *integrationTests.TestWalletAccount, + player *integrationTests.TestWalletAccount, + rcvAddr []byte, + value *big.Int, + gasLimit uint64, + txData []byte, +) (*transaction.Transaction, *transaction.Transaction) + func TestRelayedTransactionInMultiShardEnvironmentWithNormalTx(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } + t.Run("relayed v1", testRelayedTransactionInMultiShardEnvironmentWithNormalTx(CreateAndSendRelayedAndUserTx, false)) + t.Run("relayed v3", testRelayedTransactionInMultiShardEnvironmentWithNormalTx(CreateAndSendRelayedAndUserTxV3, true)) +} - nodes, idxProposers, players, relayer := CreateGeneralSetupForRelayTxTest() - defer func() { - for _, n := range nodes { - n.Close() - } - }() +func TestRelayedTransactionInMultiShardEnvironmentWithSmartContractTX(t *testing.T) { + t.Run("relayed v1", testRelayedTransactionInMultiShardEnvironmentWithSmartContractTX(CreateAndSendRelayedAndUserTx, false)) + t.Run("relayed v2", testRelayedTransactionInMultiShardEnvironmentWithSmartContractTX(CreateAndSendRelayedAndUserTxV2, false)) + t.Run("relayed v3", testRelayedTransactionInMultiShardEnvironmentWithSmartContractTX(CreateAndSendRelayedAndUserTxV3, true)) +} - sendValue := big.NewInt(5) - round := uint64(0) - nonce := uint64(0) - round = integrationTests.IncrementAndPrintRound(round) - nonce++ +func TestRelayedTransactionInMultiShardEnvironmentWithESDTTX(t *testing.T) { + t.Run("relayed v1", testRelayedTransactionInMultiShardEnvironmentWithESDTTX(CreateAndSendRelayedAndUserTx, false)) + t.Run("relayed v2", testRelayedTransactionInMultiShardEnvironmentWithESDTTX(CreateAndSendRelayedAndUserTxV2, false)) + t.Run("relayed v3", testRelayedTransactionInMultiShardEnvironmentWithESDTTX(CreateAndSendRelayedAndUserTxV3, true)) +} - receiverAddress1 := []byte("12345678901234567890123456789012") - receiverAddress2 := []byte("12345678901234567890123456789011") +func TestRelayedTransactionInMultiShardEnvironmentWithAttestationContract(t *testing.T) { + t.Run("relayed v1", testRelayedTransactionInMultiShardEnvironmentWithAttestationContract(CreateAndSendRelayedAndUserTx, false)) + t.Run("relayed v3", testRelayedTransactionInMultiShardEnvironmentWithAttestationContract(CreateAndSendRelayedAndUserTxV3, true)) +} - nrRoundsToTest := int64(5) - for i := int64(0); i < nrRoundsToTest; i++ { - for _, player := range players { - _ = CreateAndSendRelayedAndUserTx(nodes, relayer, player, receiverAddress1, sendValue, integrationTests.MinTxGasLimit, []byte("")) - _ = CreateAndSendRelayedAndUserTx(nodes, relayer, player, receiverAddress2, sendValue, integrationTests.MinTxGasLimit, []byte("")) +func testRelayedTransactionInMultiShardEnvironmentWithNormalTx( + createAndSendRelayedAndUserTxFunc createAndSendRelayedAndUserTxFuncType, + relayedV3Test bool, +) func(t *testing.T) { + return func(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") } - round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + nodes, idxProposers, players, relayer := CreateGeneralSetupForRelayTxTest(relayedV3Test) + defer func() { + for _, n := range nodes { + n.Close() + } + }() - time.Sleep(integrationTests.StepDelay) - } + sendValue := big.NewInt(5) + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ - roundToPropagateMultiShard := int64(20) - for i := int64(0); i <= roundToPropagateMultiShard; i++ { - round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) - } + receiverAddress1 := []byte("12345678901234567890123456789012") + receiverAddress2 := []byte("12345678901234567890123456789011") - time.Sleep(time.Second) - receiver1 := GetUserAccount(nodes, receiverAddress1) - receiver2 := GetUserAccount(nodes, receiverAddress2) + nrRoundsToTest := int64(5) - finalBalance := big.NewInt(0).Mul(big.NewInt(int64(len(players))), big.NewInt(nrRoundsToTest)) - finalBalance.Mul(finalBalance, sendValue) - assert.Equal(t, receiver1.GetBalance().Cmp(finalBalance), 0) - assert.Equal(t, receiver2.GetBalance().Cmp(finalBalance), 0) + txsSentEachRound := big.NewInt(2) // 2 relayed txs each round + txsSentPerPlayer := big.NewInt(0).Mul(txsSentEachRound, big.NewInt(nrRoundsToTest)) + initialPlayerFunds := big.NewInt(0).Mul(sendValue, txsSentPerPlayer) + integrationTests.MintAllPlayers(nodes, players, initialPlayerFunds) - players = append(players, relayer) - checkPlayerBalances(t, nodes, players) -} + for i := int64(0); i < nrRoundsToTest; i++ { + for _, player := range players { + _, _ = createAndSendRelayedAndUserTxFunc(nodes, relayer, player, receiverAddress1, sendValue, integrationTests.MinTxGasLimit, []byte("")) + _, _ = createAndSendRelayedAndUserTxFunc(nodes, relayer, player, receiverAddress2, sendValue, integrationTests.MinTxGasLimit, []byte("")) + } -func TestRelayedTransactionInMultiShardEnvironmentWithSmartContractTX(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + + time.Sleep(integrationTests.StepDelay) + } + + roundToPropagateMultiShard := int64(20) + for i := int64(0); i <= roundToPropagateMultiShard; i++ { + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + } + + time.Sleep(time.Second) + receiver1 := GetUserAccount(nodes, receiverAddress1) + receiver2 := GetUserAccount(nodes, receiverAddress2) + + finalBalance := big.NewInt(0).Mul(big.NewInt(int64(len(players))), big.NewInt(nrRoundsToTest)) + finalBalance.Mul(finalBalance, sendValue) + assert.Equal(t, receiver1.GetBalance().Cmp(finalBalance), 0) + assert.Equal(t, receiver2.GetBalance().Cmp(finalBalance), 0) + + players = append(players, relayer) + checkPlayerBalances(t, nodes, players) } +} - nodes, idxProposers, players, relayer := CreateGeneralSetupForRelayTxTest() - defer func() { - for _, n := range nodes { - n.Close() +func testRelayedTransactionInMultiShardEnvironmentWithSmartContractTX( + createAndSendRelayedAndUserTxFunc createAndSendRelayedAndUserTxFuncType, + relayedV3Test bool, +) func(t *testing.T) { + return func(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") } - }() - - sendValue := big.NewInt(5) - round := uint64(0) - nonce := uint64(0) - round = integrationTests.IncrementAndPrintRound(round) - nonce++ - - receiverAddress1 := []byte("12345678901234567890123456789012") - receiverAddress2 := []byte("12345678901234567890123456789011") - - ownerNode := nodes[0] - initialSupply := "00" + hex.EncodeToString(big.NewInt(100000000000).Bytes()) - scCode := wasm.GetSCCode("../../vm/wasm/testdata/erc20-c-03/wrc20_wasm.wasm") - scAddress, _ := ownerNode.BlockchainHook.NewAddress(ownerNode.OwnAccount.Address, ownerNode.OwnAccount.Nonce, vmFactory.WasmVirtualMachine) - - integrationTests.CreateAndSendTransactionWithGasLimit( - nodes[0], - big.NewInt(0), - 20000, - make([]byte, 32), - []byte(wasm.CreateDeployTxData(scCode)+"@"+initialSupply), - integrationTests.ChainID, - integrationTests.MinTransactionVersion, - ) - - transferTokenVMGas := uint64(7200) - transferTokenBaseGas := ownerNode.EconomicsData.ComputeGasLimit(&transaction.Transaction{Data: []byte("transferToken@" + hex.EncodeToString(receiverAddress1) + "@00" + hex.EncodeToString(sendValue.Bytes()))}) - transferTokenFullGas := transferTokenBaseGas + transferTokenVMGas - - initialTokenSupply := big.NewInt(1000000000) - initialPlusForGas := uint64(1000) - for _, player := range players { + + nodes, idxProposers, players, relayer := CreateGeneralSetupForRelayTxTest(relayedV3Test) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + sendValue := big.NewInt(5) + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + receiverAddress1 := []byte("12345678901234567890123456789012") + receiverAddress2 := []byte("12345678901234567890123456789011") + + ownerNode := nodes[0] + initialSupply := "00" + hex.EncodeToString(big.NewInt(100000000000).Bytes()) + scCode := wasm.GetSCCode("../../vm/wasm/testdata/erc20-c-03/wrc20_wasm.wasm") + scAddress, _ := ownerNode.BlockchainHook.NewAddress(ownerNode.OwnAccount.Address, ownerNode.OwnAccount.Nonce, vmFactory.WasmVirtualMachine) + integrationTests.CreateAndSendTransactionWithGasLimit( - ownerNode, + nodes[0], big.NewInt(0), - transferTokenFullGas+initialPlusForGas, - scAddress, - []byte("transferToken@"+hex.EncodeToString(player.Address)+"@00"+hex.EncodeToString(initialTokenSupply.Bytes())), + 200000, + make([]byte, 32), + []byte(wasm.CreateDeployTxData(scCode)+"@"+initialSupply), integrationTests.ChainID, integrationTests.MinTransactionVersion, ) - } - time.Sleep(time.Second) - nrRoundsToTest := int64(5) - for i := int64(0); i < nrRoundsToTest; i++ { - round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + transferTokenVMGas := uint64(720000) + transferTokenBaseGas := ownerNode.EconomicsData.ComputeGasLimit(&transaction.Transaction{Data: []byte("transferToken@" + hex.EncodeToString(receiverAddress1) + "@00" + hex.EncodeToString(sendValue.Bytes()))}) + transferTokenFullGas := transferTokenBaseGas + transferTokenVMGas + initialTokenSupply := big.NewInt(1000000000) + initialPlusForGas := uint64(100000) for _, player := range players { - _ = CreateAndSendRelayedAndUserTx(nodes, relayer, player, scAddress, big.NewInt(0), - transferTokenFullGas, []byte("transferToken@"+hex.EncodeToString(receiverAddress1)+"@00"+hex.EncodeToString(sendValue.Bytes()))) - _ = CreateAndSendRelayedAndUserTx(nodes, relayer, player, scAddress, big.NewInt(0), - transferTokenFullGas, []byte("transferToken@"+hex.EncodeToString(receiverAddress2)+"@00"+hex.EncodeToString(sendValue.Bytes()))) + integrationTests.CreateAndSendTransactionWithGasLimit( + ownerNode, + big.NewInt(0), + transferTokenFullGas+initialPlusForGas, + scAddress, + []byte("transferToken@"+hex.EncodeToString(player.Address)+"@00"+hex.EncodeToString(initialTokenSupply.Bytes())), + integrationTests.ChainID, + integrationTests.MinTransactionVersion, + ) } + time.Sleep(time.Second) - time.Sleep(integrationTests.StepDelay) - } + nrRoundsToTest := int64(5) + for i := int64(0); i < nrRoundsToTest; i++ { + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) - roundToPropagateMultiShard := int64(20) - for i := int64(0); i <= roundToPropagateMultiShard; i++ { - round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) - } + for _, player := range players { + _, _ = createAndSendRelayedAndUserTxFunc(nodes, relayer, player, scAddress, big.NewInt(0), + transferTokenFullGas, []byte("transferToken@"+hex.EncodeToString(receiverAddress1)+"@00"+hex.EncodeToString(sendValue.Bytes()))) + _, _ = createAndSendRelayedAndUserTxFunc(nodes, relayer, player, scAddress, big.NewInt(0), + transferTokenFullGas, []byte("transferToken@"+hex.EncodeToString(receiverAddress2)+"@00"+hex.EncodeToString(sendValue.Bytes()))) + } + + time.Sleep(integrationTests.StepDelay) + } + time.Sleep(time.Second) - time.Sleep(time.Second) + roundToPropagateMultiShard := int64(25) + for i := int64(0); i <= roundToPropagateMultiShard; i++ { + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + } - finalBalance := big.NewInt(0).Mul(big.NewInt(int64(len(players))), big.NewInt(nrRoundsToTest)) - finalBalance.Mul(finalBalance, sendValue) + time.Sleep(time.Second) - checkSCBalance(t, ownerNode, scAddress, receiverAddress1, finalBalance) - checkSCBalance(t, ownerNode, scAddress, receiverAddress1, finalBalance) + finalBalance := big.NewInt(0).Mul(big.NewInt(int64(len(players))), big.NewInt(nrRoundsToTest)) + finalBalance.Mul(finalBalance, sendValue) - checkPlayerBalances(t, nodes, players) + checkSCBalance(t, ownerNode, scAddress, receiverAddress1, finalBalance) + checkSCBalance(t, ownerNode, scAddress, receiverAddress1, finalBalance) - userAcc := GetUserAccount(nodes, relayer.Address) - assert.Equal(t, userAcc.GetBalance().Cmp(relayer.Balance), 1) -} + checkPlayerBalances(t, nodes, players) -func TestRelayedTransactionInMultiShardEnvironmentWithESDTTX(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") + userAcc := GetUserAccount(nodes, relayer.Address) + assert.Equal(t, 1, userAcc.GetBalance().Cmp(relayer.Balance)) } +} - nodes, idxProposers, players, relayer := CreateGeneralSetupForRelayTxTest() - defer func() { - for _, n := range nodes { - n.Close() +func testRelayedTransactionInMultiShardEnvironmentWithESDTTX( + createAndSendRelayedAndUserTxFunc createAndSendRelayedAndUserTxFuncType, + relayedV3Test bool, +) func(t *testing.T) { + return func(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") } - }() - - sendValue := big.NewInt(5) - round := uint64(0) - nonce := uint64(0) - round = integrationTests.IncrementAndPrintRound(round) - nonce++ - - receiverAddress1 := []byte("12345678901234567890123456789012") - receiverAddress2 := []byte("12345678901234567890123456789011") - - // ------- send token issue - issuePrice := big.NewInt(1000) - initalSupply := big.NewInt(10000000000) - tokenIssuer := nodes[0] - txData := "issue" + - "@" + hex.EncodeToString([]byte("robertWhyNot")) + - "@" + hex.EncodeToString([]byte("RBT")) + - "@" + hex.EncodeToString(initalSupply.Bytes()) + - "@" + hex.EncodeToString([]byte{6}) - integrationTests.CreateAndSendTransaction(tokenIssuer, nodes, issuePrice, vm.ESDTSCAddress, txData, core.MinMetaTxExtraGasCost) - - time.Sleep(time.Second) - nrRoundsToPropagateMultiShard := int64(10) - for i := int64(0); i < nrRoundsToPropagateMultiShard; i++ { - round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) - time.Sleep(integrationTests.StepDelay) - } - time.Sleep(time.Second) - tokenIdenfitifer := string(integrationTests.GetTokenIdentifier(nodes, []byte("RBT"))) - CheckAddressHasTokens(t, tokenIssuer.OwnAccount.Address, nodes, tokenIdenfitifer, initalSupply) + nodes, idxProposers, players, relayer := CreateGeneralSetupForRelayTxTest(relayedV3Test) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + sendValue := big.NewInt(5) + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + receiverAddress1 := []byte("12345678901234567890123456789012") + receiverAddress2 := []byte("12345678901234567890123456789011") + + // ------- send token issue + issuePrice := big.NewInt(1000) + initalSupply := big.NewInt(10000000000) + tokenIssuer := nodes[0] + txData := "issue" + + "@" + hex.EncodeToString([]byte("robertWhyNot")) + + "@" + hex.EncodeToString([]byte("RBT")) + + "@" + hex.EncodeToString(initalSupply.Bytes()) + + "@" + hex.EncodeToString([]byte{6}) + integrationTests.CreateAndSendTransaction(tokenIssuer, nodes, issuePrice, vm.ESDTSCAddress, txData, core.MinMetaTxExtraGasCost) + + time.Sleep(time.Second) + nrRoundsToPropagateMultiShard := int64(10) + for i := int64(0); i < nrRoundsToPropagateMultiShard; i++ { + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + time.Sleep(integrationTests.StepDelay) + } + time.Sleep(time.Second) - // ------ send tx to players - valueToTopUp := big.NewInt(100000000) - txData = core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString([]byte(tokenIdenfitifer)) + "@" + hex.EncodeToString(valueToTopUp.Bytes()) - for _, player := range players { - integrationTests.CreateAndSendTransaction(tokenIssuer, nodes, big.NewInt(0), player.Address, txData, integrationTests.AdditionalGasLimit) - } + tokenIdenfitifer := string(integrationTests.GetTokenIdentifier(nodes, []byte("RBT"))) + CheckAddressHasTokens(t, tokenIssuer.OwnAccount.Address, nodes, tokenIdenfitifer, initalSupply) - time.Sleep(time.Second) - for i := int64(0); i < nrRoundsToPropagateMultiShard; i++ { - round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) - time.Sleep(integrationTests.StepDelay) - } - time.Sleep(time.Second) - - txData = core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString([]byte(tokenIdenfitifer)) + "@" + hex.EncodeToString(sendValue.Bytes()) - transferTokenESDTGas := uint64(1) - transferTokenBaseGas := tokenIssuer.EconomicsData.ComputeGasLimit(&transaction.Transaction{Data: []byte(txData)}) - transferTokenFullGas := transferTokenBaseGas + transferTokenESDTGas + uint64(100) // use more gas to simulate gas refund - nrRoundsToTest := int64(5) - for i := int64(0); i < nrRoundsToTest; i++ { + // ------ send tx to players + valueToTopUp := big.NewInt(100000000) + txData = core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString([]byte(tokenIdenfitifer)) + "@" + hex.EncodeToString(valueToTopUp.Bytes()) for _, player := range players { - _ = CreateAndSendRelayedAndUserTx(nodes, relayer, player, receiverAddress1, big.NewInt(0), transferTokenFullGas, []byte(txData)) - _ = CreateAndSendRelayedAndUserTx(nodes, relayer, player, receiverAddress2, big.NewInt(0), transferTokenFullGas, []byte(txData)) + integrationTests.CreateAndSendTransaction(tokenIssuer, nodes, big.NewInt(0), player.Address, txData, integrationTests.AdditionalGasLimit) } - round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + time.Sleep(time.Second) + for i := int64(0); i < nrRoundsToPropagateMultiShard; i++ { + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + time.Sleep(integrationTests.StepDelay) + } + time.Sleep(time.Second) + + txData = core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString([]byte(tokenIdenfitifer)) + "@" + hex.EncodeToString(sendValue.Bytes()) + transferTokenESDTGas := uint64(1) + transferTokenBaseGas := tokenIssuer.EconomicsData.ComputeGasLimit(&transaction.Transaction{Data: []byte(txData)}) + transferTokenFullGas := transferTokenBaseGas + transferTokenESDTGas + uint64(100) // use more gas to simulate gas refund + nrRoundsToTest := int64(5) + for i := int64(0); i < nrRoundsToTest; i++ { + for _, player := range players { + _, _ = createAndSendRelayedAndUserTxFunc(nodes, relayer, player, receiverAddress1, big.NewInt(0), transferTokenFullGas, []byte(txData)) + _, _ = createAndSendRelayedAndUserTxFunc(nodes, relayer, player, receiverAddress2, big.NewInt(0), transferTokenFullGas, []byte(txData)) + } + + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + + time.Sleep(integrationTests.StepDelay) + } - time.Sleep(integrationTests.StepDelay) - } + nrRoundsToPropagateMultiShard = int64(20) + for i := int64(0); i <= nrRoundsToPropagateMultiShard; i++ { + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + } - nrRoundsToPropagateMultiShard = int64(20) - for i := int64(0); i <= nrRoundsToPropagateMultiShard; i++ { - round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + time.Sleep(time.Second) + finalBalance := big.NewInt(0).Mul(big.NewInt(int64(len(players))), big.NewInt(nrRoundsToTest)) + finalBalance.Mul(finalBalance, sendValue) + CheckAddressHasTokens(t, receiverAddress1, nodes, tokenIdenfitifer, finalBalance) + CheckAddressHasTokens(t, receiverAddress2, nodes, tokenIdenfitifer, finalBalance) + + players = append(players, relayer) + checkPlayerBalances(t, nodes, players) } +} - time.Sleep(time.Second) - finalBalance := big.NewInt(0).Mul(big.NewInt(int64(len(players))), big.NewInt(nrRoundsToTest)) - finalBalance.Mul(finalBalance, sendValue) - CheckAddressHasTokens(t, receiverAddress1, nodes, tokenIdenfitifer, finalBalance) - CheckAddressHasTokens(t, receiverAddress2, nodes, tokenIdenfitifer, finalBalance) +func testRelayedTransactionInMultiShardEnvironmentWithAttestationContract( + createAndSendRelayedAndUserTxFunc createAndSendRelayedAndUserTxFuncType, + relayedV3Test bool, +) func(t *testing.T) { + return func(t *testing.T) { - players = append(players, relayer) - checkPlayerBalances(t, nodes, players) -} + if testing.Short() { + t.Skip("this is not a short test") + } -func TestRelayedTransactionInMultiShardEnvironmentWithAttestationContract(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } + nodes, idxProposers, players, relayer := CreateGeneralSetupForRelayTxTest(relayedV3Test) + defer func() { + for _, n := range nodes { + n.Close() + } + }() - nodes, idxProposers, players, relayer := CreateGeneralSetupForRelayTxTest() - defer func() { - for _, n := range nodes { - n.Close() + for _, node := range nodes { + node.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) } - }() - for _, node := range nodes { - node.EconomicsData.SetMaxGasLimitPerBlock(1500000000, 0) - } + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ - round := uint64(0) - nonce := uint64(0) - round = integrationTests.IncrementAndPrintRound(round) - nonce++ - - ownerNode := nodes[0] - scCode := wasm.GetSCCode("attestation.wasm") - scAddress, _ := ownerNode.BlockchainHook.NewAddress(ownerNode.OwnAccount.Address, ownerNode.OwnAccount.Nonce, vmFactory.WasmVirtualMachine) - - registerValue := big.NewInt(100) - integrationTests.CreateAndSendTransactionWithGasLimit( - nodes[0], - big.NewInt(0), - 200000, - make([]byte, 32), - []byte(wasm.CreateDeployTxData(scCode)+"@"+hex.EncodeToString(registerValue.Bytes())+"@"+hex.EncodeToString(relayer.Address)+"@"+"ababab"), - integrationTests.ChainID, - integrationTests.MinTransactionVersion, - ) - time.Sleep(time.Second) - - registerVMGas := uint64(100000) - savePublicInfoVMGas := uint64(100000) - attestVMGas := uint64(100000) - - round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) - - uniqueIDs := make([]string, len(players)) - for i, player := range players { - uniqueIDs[i] = core.UniqueIdentifier() - _ = CreateAndSendRelayedAndUserTx(nodes, relayer, player, scAddress, registerValue, - registerVMGas, []byte("register@"+hex.EncodeToString([]byte(uniqueIDs[i])))) - } - time.Sleep(time.Second) + ownerNode := nodes[0] + scCode := wasm.GetSCCode("attestation.wasm") + scAddress, _ := ownerNode.BlockchainHook.NewAddress(ownerNode.OwnAccount.Address, ownerNode.OwnAccount.Nonce, vmFactory.WasmVirtualMachine) - nrRoundsToPropagateMultiShard := int64(10) - for i := int64(0); i <= nrRoundsToPropagateMultiShard; i++ { - round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) - } + registerValue := big.NewInt(100) + integrationTests.CreateAndSendTransactionWithGasLimit( + nodes[0], + big.NewInt(0), + 2000000, + make([]byte, 32), + []byte(wasm.CreateDeployTxData(scCode)+"@"+hex.EncodeToString(registerValue.Bytes())+"@"+hex.EncodeToString(relayer.Address)+"@"+"ababab"), + integrationTests.ChainID, + integrationTests.MinTransactionVersion, + ) + time.Sleep(time.Second) - cryptoHook := hooks.NewVMCryptoHook() - privateInfos := make([]string, len(players)) - for i := range players { - privateInfos[i] = core.UniqueIdentifier() - publicInfo, _ := cryptoHook.Keccak256([]byte(privateInfos[i])) - createAndSendSimpleTransaction(nodes, relayer, scAddress, big.NewInt(0), savePublicInfoVMGas, - []byte("savePublicInfo@"+hex.EncodeToString([]byte(uniqueIDs[i]))+"@"+hex.EncodeToString(publicInfo))) - } - time.Sleep(time.Second) + registerVMGas := uint64(10000000) + savePublicInfoVMGas := uint64(10000000) + attestVMGas := uint64(10000000) - nrRoundsToPropagate := int64(5) - for i := int64(0); i <= nrRoundsToPropagate; i++ { round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) - } - for i, player := range players { - _ = CreateAndSendRelayedAndUserTx(nodes, relayer, player, scAddress, big.NewInt(0), attestVMGas, - []byte("attest@"+hex.EncodeToString([]byte(uniqueIDs[i]))+"@"+hex.EncodeToString([]byte(privateInfos[i])))) - _ = CreateAndSendRelayedAndUserTx(nodes, relayer, player, scAddress, registerValue, - registerVMGas, []byte("register@"+hex.EncodeToString([]byte(uniqueIDs[i])))) - } - time.Sleep(time.Second) + integrationTests.MintAllPlayers(nodes, players, registerValue) - nrRoundsToPropagateMultiShard = int64(20) - for i := int64(0); i <= nrRoundsToPropagateMultiShard; i++ { - round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) - integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) - } + uniqueIDs := make([]string, len(players)) + for i, player := range players { + uniqueIDs[i] = core.UniqueIdentifier() + _, _ = createAndSendRelayedAndUserTxFunc(nodes, relayer, player, scAddress, registerValue, + registerVMGas, []byte("register@"+hex.EncodeToString([]byte(uniqueIDs[i])))) + } + time.Sleep(time.Second) - for i, player := range players { - checkAttestedPublicKeys(t, ownerNode, scAddress, []byte(uniqueIDs[i]), player.Address) + nrRoundsToPropagateMultiShard := int64(10) + for i := int64(0); i <= nrRoundsToPropagateMultiShard; i++ { + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + } + + cryptoHook := hooks.NewVMCryptoHook() + privateInfos := make([]string, len(players)) + for i := range players { + privateInfos[i] = core.UniqueIdentifier() + publicInfo, _ := cryptoHook.Keccak256([]byte(privateInfos[i])) + createAndSendSimpleTransaction(nodes, relayer, scAddress, big.NewInt(0), savePublicInfoVMGas, + []byte("savePublicInfo@"+hex.EncodeToString([]byte(uniqueIDs[i]))+"@"+hex.EncodeToString(publicInfo))) + } + time.Sleep(time.Second) + + nrRoundsToPropagate := int64(5) + for i := int64(0); i <= nrRoundsToPropagate; i++ { + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + } + + integrationTests.MintAllPlayers(nodes, players, registerValue) + + for i, player := range players { + _, _ = createAndSendRelayedAndUserTxFunc(nodes, relayer, player, scAddress, big.NewInt(0), attestVMGas, + []byte("attest@"+hex.EncodeToString([]byte(uniqueIDs[i]))+"@"+hex.EncodeToString([]byte(privateInfos[i])))) + _, _ = createAndSendRelayedAndUserTxFunc(nodes, relayer, player, scAddress, registerValue, + registerVMGas, []byte("register@"+hex.EncodeToString([]byte(uniqueIDs[i])))) + } + time.Sleep(time.Second) + + nrRoundsToPropagateMultiShard = int64(20) + for i := int64(0); i <= nrRoundsToPropagateMultiShard; i++ { + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + integrationTests.AddSelfNotarizedHeaderByMetachain(nodes) + } + + for i, player := range players { + checkAttestedPublicKeys(t, ownerNode, scAddress, []byte(uniqueIDs[i]), player.Address) + } } } @@ -385,7 +449,7 @@ func checkPlayerBalances( players []*integrationTests.TestWalletAccount) { for _, player := range players { userAcc := GetUserAccount(nodes, player.Address) - assert.Equal(t, userAcc.GetBalance().Cmp(player.Balance), 0) + assert.Equal(t, 0, userAcc.GetBalance().Cmp(player.Balance)) assert.Equal(t, userAcc.GetNonce(), player.Nonce) } } diff --git a/integrationTests/multiShard/smartContract/dns/dns_test.go b/integrationTests/multiShard/smartContract/dns/dns_test.go index 20135a2bda4..1f983617cb1 100644 --- a/integrationTests/multiShard/smartContract/dns/dns_test.go +++ b/integrationTests/multiShard/smartContract/dns/dns_test.go @@ -202,7 +202,7 @@ func sendRegisterUserNameAsRelayedTx( for i, player := range players { userName := generateNewUserName() scAddress := selectDNSAddressFromUserName(sortedDNSAddresses, userName) - _ = relayedTx.CreateAndSendRelayedAndUserTx(nodes, relayer, player, []byte(scAddress), dnsRegisterValue, + _, _ = relayedTx.CreateAndSendRelayedAndUserTx(nodes, relayer, player, []byte(scAddress), dnsRegisterValue, gasLimit, []byte("register@"+hex.EncodeToString([]byte(userName)))) userNames[i] = userName } diff --git a/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go b/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go index d01f900d5e2..b74acc3b392 100644 --- a/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go +++ b/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go @@ -29,12 +29,14 @@ func TestBridgeSetupAndBurn(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, FixAsyncCallBackArgsListEnableEpoch: integrationTests.UnreachableEpoch, } arwenVersion := config.WasmVMVersionByEpoch{Version: "v1.4"} - vmConfig := &config.VirtualMachineConfig{WasmVMVersions: []config.WasmVMVersionByEpoch{arwenVersion}} + vmConfig := &config.VirtualMachineConfig{ + WasmVMVersions: []config.WasmVMVersionByEpoch{arwenVersion}, + TransferAndExecuteByUserAddresses: []string{"erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe3"}, + } nodes := integrationTests.CreateNodesWithEnableEpochsAndVmConfig( numOfShards, nodesPerShard, diff --git a/integrationTests/multiShard/smartContract/scCallingSC_test.go b/integrationTests/multiShard/smartContract/scCallingSC_test.go index 329b86de832..52b24371d15 100644 --- a/integrationTests/multiShard/smartContract/scCallingSC_test.go +++ b/integrationTests/multiShard/smartContract/scCallingSC_test.go @@ -800,7 +800,7 @@ func TestSCCallingInCrossShardDelegation(t *testing.T) { // activate the delegation, this involves an async call to validatorSC stakeAllAvailableTxData := "stakeAllAvailable" - integrationTests.CreateAndSendTransaction(shardNode, nodes, big.NewInt(0), delegateSCAddress, stakeAllAvailableTxData, integrationTests.AdditionalGasLimit) + integrationTests.CreateAndSendTransaction(shardNode, nodes, big.NewInt(0), delegateSCAddress, stakeAllAvailableTxData, 2*integrationTests.AdditionalGasLimit) nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, 1, nonce, round, idxProposers) @@ -850,6 +850,7 @@ func TestSCCallingInCrossShardDelegation(t *testing.T) { FuncName: "getUserActiveStake", Arguments: [][]byte{delegateSCOwner}, } + vmOutput4, _, _ := shardNode.SCQueryService.ExecuteQuery(scQuery4) require.NotNil(t, vmOutput4) require.Equal(t, len(vmOutput4.ReturnData), 1) diff --git a/integrationTests/multiShard/softfork/scDeploy_test.go b/integrationTests/multiShard/softfork/scDeploy_test.go index dc735b26abd..8af125f5797 100644 --- a/integrationTests/multiShard/softfork/scDeploy_test.go +++ b/integrationTests/multiShard/softfork/scDeploy_test.go @@ -26,7 +26,6 @@ func TestScDeploy(t *testing.T) { t.Skip("this is not a short test") } - builtinEnableEpoch := uint32(0) deployEnableEpoch := uint32(1) relayedTxEnableEpoch := uint32(0) penalizedTooMuchGasEnableEpoch := uint32(0) @@ -34,11 +33,13 @@ func TestScDeploy(t *testing.T) { scProcessorV2EnableEpoch := integrationTests.UnreachableEpoch enableEpochs := integrationTests.CreateEnableEpochsConfig() - enableEpochs.BuiltInFunctionOnMetaEnableEpoch = builtinEnableEpoch enableEpochs.SCDeployEnableEpoch = deployEnableEpoch enableEpochs.RelayedTransactionsEnableEpoch = relayedTxEnableEpoch enableEpochs.PenalizedTooMuchGasEnableEpoch = penalizedTooMuchGasEnableEpoch enableEpochs.SCProcessorV2EnableEpoch = scProcessorV2EnableEpoch + enableEpochs.StakingV4Step1EnableEpoch = integrationTests.StakingV4Step1EnableEpoch + enableEpochs.StakingV4Step2EnableEpoch = integrationTests.StakingV4Step2EnableEpoch + enableEpochs.StakingV4Step3EnableEpoch = integrationTests.StakingV4Step3EnableEpoch shardNode := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, diff --git a/integrationTests/node/getAccount/getAccount_test.go b/integrationTests/node/getAccount/getAccount_test.go index 388ef74c5a3..487c8b1a15a 100644 --- a/integrationTests/node/getAccount/getAccount_test.go +++ b/integrationTests/node/getAccount/getAccount_test.go @@ -31,13 +31,15 @@ func createAccountsRepository(accDB state.AccountsAdapter, blockchain chainData. } func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } trieStorage, _ := integrationTests.CreateTrieStorageManager(testscommon.CreateMemUnit()) accDB, _ := integrationTests.CreateAccountsDB(0, trieStorage) rootHash, _ := accDB.Commit() - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.AddressPubKeyConverterField = integrationTests.TestAddressPubkeyConverter dataComponents := integrationTests.GetDefaultDataComponents() @@ -67,7 +69,9 @@ func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { } func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } testNonce := uint64(7) testBalance := big.NewInt(100) @@ -77,7 +81,7 @@ func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { testPubkey := integrationTests.CreateAccount(accDB, testNonce, testBalance) rootHash, _ := accDB.Commit() - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.AddressPubKeyConverterField = testscommon.RealWorldBech32PubkeyConverter dataComponents := integrationTests.GetDefaultDataComponents() diff --git a/integrationTests/nodesCoordinatorFactory.go b/integrationTests/nodesCoordinatorFactory.go index 08bf6f0f3dd..28267d44c5a 100644 --- a/integrationTests/nodesCoordinatorFactory.go +++ b/integrationTests/nodesCoordinatorFactory.go @@ -11,8 +11,8 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" ) @@ -54,7 +54,12 @@ func (tpn *IndexHashedNodesCoordinatorFactory) CreateNodesCoordinator(arg ArgInd MaxNodesEnableConfig: nil, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } + nodeShuffler, _ := nodesCoordinator.NewHashValidatorsShuffler(nodeShufflerArgs) + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + TestMarshalizer, + StakingV4Step2EnableEpoch, + ) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ ShardConsensusGroupSize: arg.shardConsensusGroupSize, MetaConsensusGroupSize: arg.metaConsensusGroupSize, @@ -75,14 +80,15 @@ func (tpn *IndexHashedNodesCoordinatorFactory) CreateNodesCoordinator(arg ArgInd IsFullArchive: false, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { - if flag == common.RefactorPeersMiniBlocksFlag { + if flag == common.RefactorPeersMiniBlocksFlag || flag == common.StakingV4Step2Flag { return UnreachableEpoch } return 0 }, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } nodesCoord, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) if err != nil { @@ -114,7 +120,12 @@ func (ihncrf *IndexHashedNodesCoordinatorWithRaterFactory) CreateNodesCoordinato MaxNodesEnableConfig: nil, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } + nodeShuffler, _ := nodesCoordinator.NewHashValidatorsShuffler(shufflerArgs) + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + TestMarshalizer, + StakingV4Step2EnableEpoch, + ) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ ShardConsensusGroupSize: arg.shardConsensusGroupSize, MetaConsensusGroupSize: arg.metaConsensusGroupSize, @@ -141,8 +152,9 @@ func (ihncrf *IndexHashedNodesCoordinatorWithRaterFactory) CreateNodesCoordinato return 0 }, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } baseCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/oneNodeNetwork.go b/integrationTests/oneNodeNetwork.go deleted file mode 100644 index 720ff0529c6..00000000000 --- a/integrationTests/oneNodeNetwork.go +++ /dev/null @@ -1,70 +0,0 @@ -package integrationTests - -import ( - "math/big" - "testing" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-go/process" -) - -type oneNodeNetwork struct { - Round uint64 - Nonce uint64 - - Node *TestProcessorNode -} - -// NewOneNodeNetwork creates a one-node network, useful for some integration tests -func NewOneNodeNetwork() *oneNodeNetwork { - n := &oneNodeNetwork{} - - nodes := CreateNodes( - 1, - 1, - 0, - ) - - n.Node = nodes[0] - return n -} - -// Stop stops the test network -func (n *oneNodeNetwork) Stop() { - n.Node.Close() -} - -// Mint mints the given address -func (n *oneNodeNetwork) Mint(address []byte, value *big.Int) { - MintAddress(n.Node.AccntState, address, value) -} - -// GetMinGasPrice returns the min gas price -func (n *oneNodeNetwork) GetMinGasPrice() uint64 { - return n.Node.EconomicsData.GetMinGasPrice() -} - -// MaxGasLimitPerBlock returns the max gas per block -func (n *oneNodeNetwork) MaxGasLimitPerBlock() uint64 { - return n.Node.EconomicsData.MaxGasLimitPerBlock(0) - 1 -} - -// GoToRoundOne advances processing to block and round 1 -func (n *oneNodeNetwork) GoToRoundOne() { - n.Round = IncrementAndPrintRound(n.Round) - n.Nonce++ -} - -// Continue advances processing with a number of rounds -func (n *oneNodeNetwork) Continue(t *testing.T, numRounds int) { - n.Nonce, n.Round = WaitOperationToBeDone(t, []*TestProcessorNode{n.Node}, numRounds, n.Nonce, n.Round, []int{0}) -} - -// AddTxToPool adds a transaction to the pool (skips signature checks and interceptors) -func (n *oneNodeNetwork) AddTxToPool(tx *transaction.Transaction) { - txHash, _ := core.CalculateHash(TestMarshalizer, TestHasher, tx) - sourceShard := n.Node.ShardCoordinator.ComputeId(tx.SndAddr) - cacheIdentifier := process.ShardCacherIdentifier(sourceShard, sourceShard) - n.Node.DataPool.Transactions().AddData(txHash, tx, tx.Size(), cacheIdentifier) -} diff --git a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go index c11c73838c5..d74d999779a 100644 --- a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go +++ b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go @@ -31,7 +31,7 @@ func createDefaultConfig() p2pConfig.P2PConfig { Type: "optimized", RefreshIntervalInSec: 1, RoutingTableRefreshIntervalInSec: 1, - ProtocolID: "/erd/kad/1.0.0", + ProtocolIDs: []string{"/erd/kad/1.0.0"}, InitialPeerList: nil, BucketSize: 100, }, @@ -39,6 +39,10 @@ func createDefaultConfig() p2pConfig.P2PConfig { } func TestConnectionsInNetworkShardingWithShardingWithLists(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + p2pCfg := createDefaultConfig() p2pCfg.Sharding = p2pConfig.ShardingConfig{ TargetPeerCount: 12, @@ -54,10 +58,6 @@ func TestConnectionsInNetworkShardingWithShardingWithLists(t *testing.T) { } func testConnectionsInNetworkSharding(t *testing.T, p2pConfig p2pConfig.P2PConfig) { - if testing.Short() { - t.Skip("this is not a short test") - } - nodesPerShard := 8 numMetaNodes := 8 numObserversOnShard := 2 diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go index 6881284899b..f788de20f84 100644 --- a/integrationTests/realcomponents/processorRunner.go +++ b/integrationTests/realcomponents/processorRunner.go @@ -304,6 +304,7 @@ func (pr *ProcessorRunner) createStatusComponents(tb testing.TB) { pr.CoreComponents.NodeTypeProvider(), pr.CoreComponents.EnableEpochsHandler(), pr.DataComponents.Datapool().CurrentEpochValidatorInfo(), + pr.BootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(tb, err) @@ -406,6 +407,7 @@ func (pr *ProcessorRunner) createProcessComponents(tb testing.TB) { argsProcess := factoryProcessing.ProcessComponentsFactoryArgs{ Config: *pr.Config.GeneralConfig, EpochConfig: *pr.Config.EpochConfig, + RoundConfig: *pr.Config.RoundConfig, PrefConfigs: *pr.Config.PreferencesConfig, ImportDBConfig: *pr.Config.ImportDbConfig, FlagsConfig: config.ContextFlagsConfig{ diff --git a/integrationTests/realcomponents/processorRunner_test.go b/integrationTests/realcomponents/processorRunner_test.go index 55951b63831..78d0013597e 100644 --- a/integrationTests/realcomponents/processorRunner_test.go +++ b/integrationTests/realcomponents/processorRunner_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/require" ) func TestNewProcessorRunnerAndClose(t *testing.T) { @@ -11,7 +12,9 @@ func TestNewProcessorRunnerAndClose(t *testing.T) { t.Skip("this is not a short test") } - cfg := testscommon.CreateTestConfigs(t, "../../cmd/node/config") + cfg, err := testscommon.CreateTestConfigs(t.TempDir(), "../../cmd/node/config") + require.Nil(t, err) + pr := NewProcessorRunner(t, *cfg) pr.Close(t) } diff --git a/integrationTests/realcomponents/txsimulator/componentConstruction_test.go b/integrationTests/realcomponents/txsimulator/componentConstruction_test.go index 7aa899e5afa..fe162c5a2d5 100644 --- a/integrationTests/realcomponents/txsimulator/componentConstruction_test.go +++ b/integrationTests/realcomponents/txsimulator/componentConstruction_test.go @@ -23,7 +23,9 @@ func TestTransactionSimulationComponentConstructionOnMetachain(t *testing.T) { t.Skip("this is not a short test") } - cfg := testscommon.CreateTestConfigs(t, "../../../cmd/node/config") + cfg, err := testscommon.CreateTestConfigs(t.TempDir(), "../../../cmd/node/config") + require.Nil(t, err) + cfg.EpochConfig.EnableEpochs.ESDTEnableEpoch = 0 cfg.EpochConfig.EnableEpochs.BuiltInFunctionsEnableEpoch = 0 cfg.PreferencesConfig.Preferences.DestinationShardAsObserver = "metachain" // the problem was only on the metachain @@ -72,7 +74,9 @@ func TestTransactionSimulationComponentConstructionOnShard(t *testing.T) { t.Skip("this is not a short test") } - cfg := testscommon.CreateTestConfigs(t, "../../../cmd/node/config") + cfg, err := testscommon.CreateTestConfigs(t.TempDir(), "../../../cmd/node/config") + require.Nil(t, err) + cfg.EpochConfig.EnableEpochs.SCDeployEnableEpoch = 0 cfg.PreferencesConfig.Preferences.DestinationShardAsObserver = "0" cfg.GeneralConfig.VirtualMachine.Execution.WasmVMVersions = []config.WasmVMVersionByEpoch{ @@ -98,7 +102,7 @@ func TestTransactionSimulationComponentConstructionOnShard(t *testing.T) { // deploy the contract txDeploy, hash := pr.CreateDeploySCTx(t, alice, "../testdata/adder/adder.wasm", 3000000, []string{"01"}) - err := pr.ExecuteTransactionAsScheduled(t, txDeploy) + err = pr.ExecuteTransactionAsScheduled(t, txDeploy) require.Nil(t, err) // get the contract address from logs diff --git a/integrationTests/singleShard/smartContract/dns_test.go b/integrationTests/singleShard/smartContract/dns_test.go index 94319e2ef7a..bdfd26da827 100644 --- a/integrationTests/singleShard/smartContract/dns_test.go +++ b/integrationTests/singleShard/smartContract/dns_test.go @@ -13,9 +13,8 @@ import ( ) func TestDNS_Register(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } expectedDNSAddress := []byte{0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 180, 108, 178, 102, 195, 67, 184, 127, 204, 159, 104, 123, 190, 33, 224, 91, 255, 244, 118, 95, 24, 217} diff --git a/integrationTests/state/genesisState/genesisState_test.go b/integrationTests/state/genesisState/genesisState_test.go index 306980f2ce6..811ae1a4901 100644 --- a/integrationTests/state/genesisState/genesisState_test.go +++ b/integrationTests/state/genesisState/genesisState_test.go @@ -70,7 +70,9 @@ func TestCreationOfTheGenesisState(t *testing.T) { } func TestExtensionNodeToBranchEdgeCaseSet1(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } tr1 := integrationTests.CreateNewDefaultTrie() tr2 := integrationTests.CreateNewDefaultTrie() @@ -105,7 +107,9 @@ func TestExtensionNodeToBranchEdgeCaseSet1(t *testing.T) { } func TestExtensionNodeToBranchEdgeCaseSet2(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } tr1 := integrationTests.CreateNewDefaultTrie() tr2 := integrationTests.CreateNewDefaultTrie() diff --git a/integrationTests/state/stateExecTransaction/stateExecTransaction_test.go b/integrationTests/state/stateExecTransaction/stateExecTransaction_test.go index c97b9ad52b6..f79e0ff22cc 100644 --- a/integrationTests/state/stateExecTransaction/stateExecTransaction_test.go +++ b/integrationTests/state/stateExecTransaction/stateExecTransaction_test.go @@ -52,7 +52,9 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { } func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } trieStorage, _ := integrationTests.CreateTrieStorageManager(integrationTests.CreateMemUnit()) accnts, _ := integrationTests.CreateAccountsDB(0, trieStorage) @@ -182,7 +184,6 @@ func TestExecTransaction_MoreTransactionsMoreIterationsWithRevertShouldWork(t *t if testing.Short() { t.Skip("this is not a short test") } - t.Parallel() trieStorage, _ := integrationTests.CreateTrieStorageManager(integrationTests.CreateMemUnit()) accnts, _ := integrationTests.CreateAccountsDB(0, trieStorage) diff --git a/integrationTests/state/stateTrie/stateTrie_test.go b/integrationTests/state/stateTrie/stateTrie_test.go index b069f31f5a2..12ec5115d28 100644 --- a/integrationTests/state/stateTrie/stateTrie_test.go +++ b/integrationTests/state/stateTrie/stateTrie_test.go @@ -2,6 +2,7 @@ package stateTrie import ( "bytes" + "context" "encoding/base64" "encoding/binary" "encoding/hex" @@ -24,12 +25,15 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/sha256" crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/mock" + esdtCommon "github.com/multiversx/mx-chain-go/integrationTests/vm/esdt" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/factory" @@ -219,15 +223,15 @@ func TestAccountsDB_CommitTwoOkAccountsShouldWork(t *testing.T) { acc, err := adb.LoadAccount(adr2) require.Nil(t, err) - stateMock := acc.(state.UserAccountHandler) - _ = stateMock.AddToBalance(balance2) + userAccount := acc.(state.UserAccountHandler) + _ = userAccount.AddToBalance(balance2) key := []byte("ABC") val := []byte("123") - _ = stateMock.SaveKeyValue(key, val) + _ = userAccount.SaveKeyValue(key, val) _ = adb.SaveAccount(state1) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) // states are now prepared, committing @@ -242,7 +246,7 @@ func TestAccountsDB_CommitTwoOkAccountsShouldWork(t *testing.T) { // reloading a new trie to test if data is inside rootHash, err = adb.RootHash() require.Nil(t, err) - err = adb.RecreateTrie(rootHash) + err = adb.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHash)) require.Nil(t, err) // checking state1 @@ -279,7 +283,7 @@ func TestTrieDB_RecreateFromStorageShouldWork(t *testing.T) { err := tr1.Commit() require.Nil(t, err) - tr2, err := tr1.Recreate(h1) + tr2, err := tr1.Recreate(holders.NewDefaultRootHashesHolder(h1)) require.Nil(t, err) valRecov, _, err := tr2.Get(key) @@ -308,15 +312,15 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te acc, err := adb.LoadAccount(adr2) require.Nil(t, err) - stateMock := acc.(state.UserAccountHandler) - _ = stateMock.AddToBalance(balance2) + userAccount := acc.(state.UserAccountHandler) + _ = userAccount.AddToBalance(balance2) key := []byte("ABC") val := []byte("123") - _ = stateMock.SaveKeyValue(key, val) + _ = userAccount.SaveKeyValue(key, val) _ = adb.SaveAccount(state1) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) // states are now prepared, committing @@ -329,7 +333,7 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te fmt.Printf("data committed! Root: %v\n", base64.StdEncoding.EncodeToString(rootHash)) // reloading a new trie to test if data is inside - err = adb.RecreateTrie(h) + err = adb.RecreateTrie(holders.NewDefaultRootHashesHolder(h)) require.Nil(t, err) // checking state1 @@ -449,9 +453,9 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() require.Nil(t, err) @@ -475,8 +479,8 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { hrWithNonce1 := base64.StdEncoding.EncodeToString(rootHash) fmt.Printf("State root - account with nonce 40: %v\n", hrWithNonce1) - stateMock.(state.UserAccountHandler).IncreaseNonce(50) - _ = adb.SaveAccount(stateMock) + userAccount.(state.UserAccountHandler).IncreaseNonce(50) + _ = adb.SaveAccount(userAccount) rootHash, err = adb.RootHash() require.Nil(t, err) @@ -526,9 +530,9 @@ func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -553,8 +557,8 @@ func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { hrWithBalance1 := base64.StdEncoding.EncodeToString(rootHash) fmt.Printf("State root - account with balance 40: %v\n", hrWithBalance1) - _ = stateMock.(state.UserAccountHandler).AddToBalance(big.NewInt(50)) - _ = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).AddToBalance(big.NewInt(50)) + _ = adb.SaveAccount(userAccount) rootHash, err = adb.RootHash() require.Nil(t, err) @@ -607,10 +611,10 @@ func TestAccountsDB_RevertCodeStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - stateMock.(state.UserAccountHandler).SetCode(code) - _ = adb.SaveAccount(stateMock) + userAccount.(state.UserAccountHandler).SetCode(code) + _ = adb.SaveAccount(userAccount) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -682,10 +686,10 @@ func TestAccountsDB_RevertDataStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) fmt.Printf("data root - 1-st account: %v\n", hrRoot1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = stateMock.(state.UserAccountHandler).SaveKeyValue(key, val) - err = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).SaveKeyValue(key, val) + err = adb.SaveAccount(userAccount) require.Nil(t, err) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -761,16 +765,16 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) fmt.Printf("data root - 1-st account: %v\n", hrRoot1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = stateMock.(state.UserAccountHandler).SaveKeyValue(key, val) - err = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).SaveKeyValue(key, val) + err = adb.SaveAccount(userAccount) require.Nil(t, err) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() require.Nil(t, err) hrCreated2 := base64.StdEncoding.EncodeToString(rootHash) - rootHash, err = stateMock.(state.UserAccountHandler).DataTrie().RootHash() + rootHash, err = userAccount.(state.UserAccountHandler).DataTrie().RootHash() require.Nil(t, err) hrRoot2 := base64.StdEncoding.EncodeToString(rootHash) @@ -792,15 +796,15 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test // Step 4. 2-nd account changes its data snapshotMod := adb.JournalLen() - stateMock, err = adb.LoadAccount(adr2) + userAccount, err = adb.LoadAccount(adr2) require.Nil(t, err) - _ = stateMock.(state.UserAccountHandler).SaveKeyValue(key, newVal) - err = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).SaveKeyValue(key, newVal) + err = adb.SaveAccount(userAccount) require.Nil(t, err) rootHash, err = adb.RootHash() require.Nil(t, err) hrCreated2p1 := base64.StdEncoding.EncodeToString(rootHash) - rootHash, err = stateMock.(state.UserAccountHandler).DataTrie().RootHash() + rootHash, err = userAccount.(state.UserAccountHandler).DataTrie().RootHash() require.Nil(t, err) hrRoot2p1 := base64.StdEncoding.EncodeToString(rootHash) @@ -820,9 +824,9 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test require.Nil(t, err) hrCreated2Rev := base64.StdEncoding.EncodeToString(rootHash) - stateMock, err = adb.LoadAccount(adr2) + userAccount, err = adb.LoadAccount(adr2) require.Nil(t, err) - rootHash, err = stateMock.(state.UserAccountHandler).DataTrie().RootHash() + rootHash, err = userAccount.(state.UserAccountHandler).DataTrie().RootHash() require.Nil(t, err) hrRoot2Rev := base64.StdEncoding.EncodeToString(rootHash) fmt.Printf("State root - reverted 2-nd account: %v\n", hrCreated2Rev) @@ -1029,7 +1033,7 @@ func BenchmarkCreateOneMillionAccounts(b *testing.B) { rootHash, err := adb.RootHash() require.Nil(b, err) - _ = adb.RecreateTrie(rootHash) + _ = adb.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHash)) fmt.Println("Completely collapsed trie") createAndExecTxs(b, addr, nrTxs, nrOfAccounts, txVal, adb) } @@ -1168,7 +1172,7 @@ func TestTrieDbPruning_GetAccountAfterPruning(t *testing.T) { rootHash2, _ := adb.Commit() adb.PruneTrie(rootHash1, state.OldRoot, state.NewPruningHandler(state.EnableDataRemoval)) - err := adb.RecreateTrie(rootHash2) + err := adb.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHash2)) require.Nil(t, err) acc, err := adb.GetExistingAccount(address1) require.NotNil(t, acc) @@ -1215,7 +1219,7 @@ func TestAccountsDB_RecreateTrieInvalidatesDataTriesCache(t *testing.T) { err = adb.RevertToSnapshot(0) require.Nil(t, err) - err = adb.RecreateTrie(rootHash) + err = adb.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHash)) require.Nil(t, err) acc1, _ = adb.LoadAccount(address1) state1 = acc1.(state.UserAccountHandler) @@ -1245,35 +1249,35 @@ func TestTrieDbPruning_GetDataTrieTrackerAfterPruning(t *testing.T) { _ = adb.SaveAccount(state1) acc2, _ := adb.LoadAccount(address2) - stateMock := acc2.(state.UserAccountHandler) - _ = stateMock.SaveKeyValue(key1, value1) - _ = stateMock.SaveKeyValue(key2, value1) - _ = adb.SaveAccount(stateMock) + userAccount := acc2.(state.UserAccountHandler) + _ = userAccount.SaveKeyValue(key1, value1) + _ = userAccount.SaveKeyValue(key2, value1) + _ = adb.SaveAccount(userAccount) oldRootHash, _ := adb.Commit() acc2, _ = adb.LoadAccount(address2) - stateMock = acc2.(state.UserAccountHandler) - _ = stateMock.SaveKeyValue(key1, value2) - _ = adb.SaveAccount(stateMock) + userAccount = acc2.(state.UserAccountHandler) + _ = userAccount.SaveKeyValue(key1, value2) + _ = adb.SaveAccount(userAccount) newRootHash, _ := adb.Commit() adb.PruneTrie(oldRootHash, state.OldRoot, state.NewPruningHandler(state.EnableDataRemoval)) - err := adb.RecreateTrie(newRootHash) + err := adb.RecreateTrie(holders.NewDefaultRootHashesHolder(newRootHash)) require.Nil(t, err) acc, err := adb.GetExistingAccount(address1) require.NotNil(t, acc) require.Nil(t, err) collapseTrie(state1, t) - collapseTrie(stateMock, t) + collapseTrie(userAccount, t) val, _, err := state1.RetrieveValue(key1) require.Nil(t, err) require.Equal(t, value1, val) - val, _, err = stateMock.RetrieveValue(key2) + val, _, err = userAccount.RetrieveValue(key2) require.Nil(t, err) require.Equal(t, value1, val) } @@ -1281,7 +1285,7 @@ func TestTrieDbPruning_GetDataTrieTrackerAfterPruning(t *testing.T) { func collapseTrie(state state.UserAccountHandler, t *testing.T) { stateRootHash := state.GetRootHash() stateTrie := state.DataTrie().(common.Trie) - stateNewTrie, _ := stateTrie.Recreate(stateRootHash) + stateNewTrie, _ := stateTrie.Recreate(holders.NewDefaultRootHashesHolder(stateRootHash)) require.NotNil(t, stateNewTrie) state.SetDataTrie(stateNewTrie) @@ -1374,7 +1378,7 @@ func TestRollbackBlockAndCheckThatPruningIsCancelledOnAccountsTrie(t *testing.T) if !bytes.Equal(rootHash, rootHashOfRollbackedBlock) { time.Sleep(time.Second * 6) - err = shardNode.AccntState.RecreateTrie(rootHashOfRollbackedBlock) + err = shardNode.AccntState.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHashOfRollbackedBlock)) require.True(t, strings.Contains(err.Error(), trie.ErrKeyNotFound.Error())) } @@ -1392,7 +1396,7 @@ func TestRollbackBlockAndCheckThatPruningIsCancelledOnAccountsTrie(t *testing.T) ) time.Sleep(time.Second * 5) - err = shardNode.AccntState.RecreateTrie(rootHashOfFirstBlock) + err = shardNode.AccntState.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHashOfFirstBlock)) require.Nil(t, err) require.Equal(t, uint64(11), nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce()) require.Equal(t, uint64(12), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) @@ -1455,7 +1459,7 @@ func TestRollbackBlockWithSameRootHashAsPreviousAndCheckThatPruningIsNotDone(t * require.Equal(t, uint64(1), nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce()) require.Equal(t, uint64(2), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) - err := shardNode.AccntState.RecreateTrie(rootHashOfFirstBlock) + err := shardNode.AccntState.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHashOfFirstBlock)) require.Nil(t, err) } @@ -1535,7 +1539,7 @@ func TestTriePruningWhenBlockIsFinal(t *testing.T) { require.Equal(t, uint64(17), nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce()) require.Equal(t, uint64(17), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) - err := shardNode.AccntState.RecreateTrie(rootHashOfFirstBlock) + err := shardNode.AccntState.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHashOfFirstBlock)) require.True(t, strings.Contains(err.Error(), trie.ErrKeyNotFound.Error())) } @@ -1683,12 +1687,12 @@ func checkTrieCanBeRecreated(tb testing.TB, node *integrationTests.TestProcessor stateTrie := node.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) roothash := node.BlockChain.GetCurrentBlockRootHash() - tr, err := stateTrie.Recreate(roothash) + tr, err := stateTrie.Recreate(holders.NewDefaultRootHashesHolder(roothash)) require.Nil(tb, err) require.NotNil(tb, tr) _, _, finalRoothash := node.BlockChain.GetFinalBlockInfo() - tr, err = stateTrie.Recreate(finalRoothash) + tr, err = stateTrie.Recreate(holders.NewDefaultRootHashesHolder(finalRoothash)) require.Nil(tb, err) require.NotNil(tb, tr) @@ -1700,7 +1704,7 @@ func checkTrieCanBeRecreated(tb testing.TB, node *integrationTests.TestProcessor err = integrationTests.TestMarshalizer.Unmarshal(hdr, hdrBytes) require.Nil(tb, err) - tr, err = stateTrie.Recreate(hdr.GetRootHash()) + tr, err = stateTrie.Recreate(holders.NewDefaultRootHashesHolder(hdr.GetRootHash())) require.Nil(tb, err) require.NotNil(tb, tr) } @@ -1851,7 +1855,7 @@ func testNodeStateSnapshotAndPruning( stateTrie := node.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) assert.Equal(t, 1, len(snapshotsRootHashes)) for i := range snapshotsRootHashes { - tr, err := stateTrie.Recreate(snapshotsRootHashes[i]) + tr, err := stateTrie.Recreate(holders.NewDefaultRootHashesHolder(snapshotsRootHashes[i])) require.Nil(t, err) require.NotNil(t, tr) } @@ -1859,7 +1863,7 @@ func testNodeStateSnapshotAndPruning( assert.Equal(t, 1, len(prunedRootHashes)) // if pruning is called for a root hash in a different epoch than the commit, then recreate trie should work for i := 0; i < len(prunedRootHashes)-1; i++ { - tr, err := stateTrie.Recreate(prunedRootHashes[i]) + tr, err := stateTrie.Recreate(holders.NewDefaultRootHashesHolder(prunedRootHashes[i])) require.Nil(t, tr) require.NotNil(t, err) } @@ -2171,10 +2175,10 @@ func checkDataTrieConsistency( for i, rootHash := range dataTriesRootHashes { _, ok := removedAccounts[i] if ok { - err := adb.RecreateTrie(rootHash) + err := adb.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHash)) assert.NotNil(t, err) } else { - err := adb.RecreateTrie(rootHash) + err := adb.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHash)) require.Nil(t, err) } } @@ -2337,6 +2341,224 @@ func Test_SnapshotStateRemovesLastSnapshotStartedAfterSnapshotFinished(t *testin assert.NotNil(t, err) } +func TestMigrateDataTrieBuiltinFunc(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("migrate shard 0 system account", func(t *testing.T) { + shardId := byte(0) + nodes, idxProposers, nonce, round := startNodesAndIssueToken(t, 2, shardId) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + valuesBeforeMigration := getValuesFromAccount(t, nodes[shardId].AccntState, core.SystemAccountAddress) + migrateDataTrieBuiltInFunc(t, nodes, shardId, core.SystemAccountAddress, nonce, round, idxProposers) + valuesAfterMigration := getValuesFromAccount(t, nodes[shardId].AccntState, core.SystemAccountAddress) + + require.Equal(t, len(valuesBeforeMigration), len(valuesAfterMigration)) + require.True(t, len(valuesAfterMigration) > 0) + for i := range valuesBeforeMigration { + require.Equal(t, valuesBeforeMigration[i], valuesAfterMigration[i]) + } + }) + t.Run("migrate shard 0 user account", func(t *testing.T) { + shardId := byte(0) + nodes, idxProposers, nonce, round := startNodesAndIssueToken(t, 2, shardId) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + migrationAddress := nodes[shardId].OwnAccount.Address + valuesBeforeMigration := getValuesFromAccount(t, nodes[shardId].AccntState, migrationAddress) + migrateDataTrieBuiltInFunc(t, nodes, shardId, migrationAddress, nonce, round, idxProposers) + valuesAfterMigration := getValuesFromAccount(t, nodes[shardId].AccntState, migrationAddress) + + require.Equal(t, len(valuesBeforeMigration), len(valuesAfterMigration)) + require.True(t, len(valuesAfterMigration) > 0) + for i := range valuesBeforeMigration { + require.Equal(t, valuesBeforeMigration[i], valuesAfterMigration[i]) + } + }) + t.Run("migrate shard 1 system account", func(t *testing.T) { + shardId := byte(1) + nodes, idxProposers, nonce, round := startNodesAndIssueToken(t, 2, shardId) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + valuesBeforeMigration := getValuesFromAccount(t, nodes[shardId].AccntState, core.SystemAccountAddress) + migrateDataTrieBuiltInFunc(t, nodes, shardId, core.SystemAccountAddress, nonce, round, idxProposers) + valuesAfterMigration := getValuesFromAccount(t, nodes[shardId].AccntState, core.SystemAccountAddress) + + require.Equal(t, len(valuesBeforeMigration), len(valuesAfterMigration)) + require.True(t, len(valuesAfterMigration) > 0) + for i := range valuesBeforeMigration { + require.Equal(t, valuesBeforeMigration[i], valuesAfterMigration[i]) + } + }) + t.Run("migrate shard 1 user account", func(t *testing.T) { + shardId := byte(1) + nodes, idxProposers, nonce, round := startNodesAndIssueToken(t, 2, shardId) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + migrationAddress := nodes[shardId].OwnAccount.Address + valuesBeforeMigration := getValuesFromAccount(t, nodes[shardId].AccntState, migrationAddress) + migrateDataTrieBuiltInFunc(t, nodes, shardId, nodes[shardId].OwnAccount.Address, nonce, round, idxProposers) + valuesAfterMigration := getValuesFromAccount(t, nodes[shardId].AccntState, migrationAddress) + + require.Equal(t, len(valuesBeforeMigration), len(valuesAfterMigration)) + require.True(t, len(valuesAfterMigration) > 0) + for i := range valuesBeforeMigration { + require.Equal(t, valuesBeforeMigration[i], valuesAfterMigration[i]) + } + }) +} + +func getValuesFromAccount(t *testing.T, adb state.AccountsAdapter, address []byte) [][]byte { + account, err := adb.GetExistingAccount(address) + require.Nil(t, err) + + chLeaves := &common.TrieIteratorChannels{ + LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), + ErrChan: errChan.NewErrChanWrapper(), + } + err = account.(state.UserAccountHandler).GetAllLeaves(chLeaves, context.Background()) + require.Nil(t, err) + + values := make([][]byte, 0) + for leaf := range chLeaves.LeavesChan { + values = append(values, leaf.Value()) + } + + err = chLeaves.ErrChan.ReadFromChanNonBlocking() + require.Nil(t, err) + + return values +} + +func migrateDataTrieBuiltInFunc( + t *testing.T, + nodes []*integrationTests.TestProcessorNode, + shardId byte, + migrationAddress []byte, + nonce uint64, + round uint64, + idxProposers []int, +) { + require.True(t, nodes[shardId].EnableEpochsHandler.IsFlagEnabled(common.AutoBalanceDataTriesFlag)) + isMigrated := getAddressMigrationStatus(t, nodes[shardId].AccntState, migrationAddress) + require.False(t, isMigrated) + + integrationTests.CreateAndSendTransactionWithSenderAccount(nodes[shardId], nodes, big.NewInt(0), nodes[shardId].OwnAccount, getDestAccountAddress(migrationAddress, shardId), core.BuiltInFunctionMigrateDataTrie, 1000000) + + time.Sleep(time.Second) + nrRoundsToPropagate := 5 + _, _ = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagate, nonce, round, idxProposers) + + isMigrated = getAddressMigrationStatus(t, nodes[shardId].AccntState, migrationAddress) + require.True(t, isMigrated) +} + +func startNodesAndIssueToken( + t *testing.T, + numOfShards int, + issuerShardId byte, +) ([]*integrationTests.TestProcessorNode, []int, uint64, uint64) { + nodesPerShard := 1 + numMetachainNodes := 1 + + enableEpochs := config.EnableEpochs{ + GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, + OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV2EnableEpoch: integrationTests.UnreachableEpoch, + StakeLimitsEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, + AutoBalanceDataTriesEnableEpoch: 1, + } + nodes := integrationTests.CreateNodesWithEnableEpochs( + numOfShards, + nodesPerShard, + numMetachainNodes, + enableEpochs, + ) + + roundsPerEpoch := uint64(5) + for _, node := range nodes { + node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) + } + + idxProposers := make([]int, numOfShards+1) + for i := 0; i < numOfShards; i++ { + idxProposers[i] = i * nodesPerShard + } + idxProposers[numOfShards] = numOfShards * nodesPerShard + + integrationTests.DisplayAndStartNodes(nodes) + + initialVal := int64(10000000000) + integrationTests.MintAllNodes(nodes, big.NewInt(initialVal)) + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + // send token issue + initialSupply := int64(10000000000) + ticker := "TCK" + esdtCommon.IssueTestTokenWithIssuerAccount(nodes, nodes[issuerShardId].OwnAccount, initialSupply, ticker) + + time.Sleep(time.Second) + nrRoundsToPropagate := 8 + nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagate, nonce, round, idxProposers) + time.Sleep(time.Second) + + tokenIdentifier := string(integrationTests.GetTokenIdentifier(nodes, []byte(ticker))) + + esdtCommon.CheckAddressHasTokens(t, nodes[issuerShardId].OwnAccount.Address, nodes, []byte(tokenIdentifier), 0, initialSupply) + + return nodes, idxProposers, nonce, round +} + +func getDestAccountAddress(migrationAddress []byte, shardId byte) []byte { + if bytes.Equal(migrationAddress, core.SystemAccountAddress) && shardId == 0 { + systemAccountAddress := bytes.Repeat([]byte{255}, 30) + systemAccountAddress = append(systemAccountAddress, []byte{0, 0}...) + return systemAccountAddress + } + + return migrationAddress +} + +func getAddressMigrationStatus(t *testing.T, adb state.AccountsAdapter, address []byte) bool { + account, err := adb.LoadAccount(address) + require.Nil(t, err) + + userAccount, ok := account.(state.UserAccountHandler) + require.True(t, ok) + + isMigrated, err := userAccount.DataTrie().IsMigratedToLatestVersion() + require.Nil(t, err) + + return isMigrated +} + func addDataTriesForAccountsStartingWithIndex( startIndex uint32, nbAccounts uint32, diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 8bfbd584a70..74650d4ce11 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/throttler" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/integrationTests" @@ -59,6 +60,10 @@ func createTestProcessorNodeAndTrieStorage( } func TestNode_RequestInterceptTrieNodesWithMessenger(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + t.Run("test with double lists version", func(t *testing.T) { testNodeRequestInterceptTrieNodesWithMessenger(t, 2) }) @@ -135,7 +140,7 @@ func testNodeRequestInterceptTrieNodesWithMessenger(t *testing.T, version int) { assert.Nil(t, err) cancel() - requesterTrie, err = requesterTrie.Recreate(rootHash) + requesterTrie, err = requesterTrie.Recreate(holders.NewDefaultRootHashesHolder(rootHash)) require.Nil(t, err) newRootHash, _ := requesterTrie.RootHash() @@ -180,6 +185,10 @@ func printStatistics(ctx context.Context, stats common.SizeSyncStatisticsHandler } func TestNode_RequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + t.Run("test with double lists version", func(t *testing.T) { testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t, 2) }) @@ -351,7 +360,7 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves err = userAccSyncer.SyncAccounts(rootHash, storageMarker.NewDisabledStorageMarker()) assert.Nil(t, err) - _ = nRequester.AccntState.RecreateTrie(rootHash) + _ = nRequester.AccntState.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHash)) newRootHash, _ := nRequester.AccntState.RootHash() assert.NotEqual(t, nilRootHash, newRootHash) @@ -501,7 +510,7 @@ func testSyncMissingSnapshotNodes(t *testing.T, version int) { for sw.IsSnapshotInProgress() { time.Sleep(time.Millisecond * 100) } - _ = nRequester.AccntState.RecreateTrie(rootHash) + _ = nRequester.AccntState.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHash)) newRootHash, _ := nRequester.AccntState.RootHash() assert.NotEqual(t, nilRootHash, newRootHash) @@ -537,7 +546,7 @@ func copyPartialState(t *testing.T, sourceNode, destinationNode *integrationTest func getDataTriesHashes(t *testing.T, tr common.Trie, dataTriesRootHashes [][]byte) [][]byte { hashes := make([][]byte, 0) for _, rh := range dataTriesRootHashes { - dt, err := tr.Recreate(rh) + dt, err := tr.Recreate(holders.NewDefaultRootHashesHolder(rh)) assert.Nil(t, err) dtHashes, err := dt.GetAllHashes() diff --git a/integrationTests/testConsensusNode.go b/integrationTests/testConsensusNode.go index 1d9b2d505b0..5f5987b11cf 100644 --- a/integrationTests/testConsensusNode.go +++ b/integrationTests/testConsensusNode.go @@ -43,6 +43,7 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" testFactory "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -234,7 +235,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { tcn.initAccountsDB() - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.SyncTimerField = syncer coreComponents.RoundHandlerField = roundHandler coreComponents.InternalMarshalizerField = TestMarshalizer @@ -244,7 +245,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { return string(ChainID) } coreComponents.GenesisTimeField = time.Unix(args.StartTime, 0) - coreComponents.GenesisNodesSetupField = &testscommon.NodesSetupStub{ + coreComponents.GenesisNodesSetupField = &genesisMocks.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return uint32(args.ConsensusSize) }, @@ -320,6 +321,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { processComponents.RoundHandlerField = roundHandler processComponents.ScheduledTxsExecutionHandlerInternal = &testscommon.ScheduledTxsExecutionStub{} processComponents.ProcessedMiniBlocksTrackerInternal = &testscommon.ProcessedMiniBlocksTrackerStub{} + processComponents.SentSignaturesTrackerInternal = &testscommon.SentSignatureTrackerStub{} dataComponents := GetDefaultDataComponents() dataComponents.BlockChain = tcn.ChainHandler @@ -366,26 +368,27 @@ func (tcn *TestConsensusNode) initNodesCoordinator( cache storage.Cacher, ) { argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusSize, - MetaConsensusGroupSize: consensusSize, - Marshalizer: TestMarshalizer, - Hasher: hasher, - Shuffler: &shardingMocks.NodeShufflerMock{}, - EpochStartNotifier: epochStartRegistrationHandler, - BootStorer: CreateMemUnit(), - NbShards: maxShards, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: pkBytes, - ConsensusGroupCache: cache, - ShuffledOutHandler: &chainShardingMocks.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - ShardIDAsObserver: tcn.ShardCoordinator.SelfId(), - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: consensusSize, + MetaConsensusGroupSize: consensusSize, + Marshalizer: TestMarshalizer, + Hasher: hasher, + Shuffler: &shardingMocks.NodeShufflerMock{}, + EpochStartNotifier: epochStartRegistrationHandler, + BootStorer: CreateMemUnit(), + NbShards: maxShards, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: pkBytes, + ConsensusGroupCache: cache, + ShuffledOutHandler: &chainShardingMocks.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardIDAsObserver: tcn.ShardCoordinator.SelfId(), + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, } tcn.NodesCoordinator, _ = nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index 25ab4a21e6e..43b2ac576a0 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -29,7 +29,6 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/factory/resolverscontainer" "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers" "github.com/multiversx/mx-chain-go/epochStart/notifier" - "github.com/multiversx/mx-chain-go/heartbeat/monitor" "github.com/multiversx/mx-chain-go/heartbeat/processor" "github.com/multiversx/mx-chain-go/heartbeat/sender" "github.com/multiversx/mx-chain-go/integrationTests/mock" @@ -52,8 +51,10 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" @@ -349,27 +350,28 @@ func CreateNodesWithTestHeartbeatNode( suCache, _ := storageunit.NewCache(cacherCfg) for shardId, validatorList := range validatorsMap { argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - ShardIDAsObserver: shardId, - NbShards: uint32(numShards), - EligibleNodes: validatorsForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: suCache, - Shuffler: &shardingMocks.NodeShufflerMock{}, - BootStorer: CreateMemUnit(), - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - Epoch: 0, - EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: uint32(numShards), + EligibleNodes: validatorsForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: suCache, + Shuffler: &shardingMocks.NodeShufflerMock{}, + BootStorer: CreateMemUnit(), + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + Epoch: 0, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) @@ -396,27 +398,28 @@ func CreateNodesWithTestHeartbeatNode( } argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - ShardIDAsObserver: shardId, - NbShards: uint32(numShards), - EligibleNodes: validatorsForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: suCache, - Shuffler: &shardingMocks.NodeShufflerMock{}, - BootStorer: CreateMemUnit(), - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - Epoch: 0, - EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: uint32(numShards), + EligibleNodes: validatorsForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: suCache, + Shuffler: &shardingMocks.NodeShufflerMock{}, + BootStorer: CreateMemUnit(), + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + Epoch: 0, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) @@ -447,7 +450,6 @@ func (thn *TestHeartbeatNode) InitTestHeartbeatNode(tb testing.TB, minPeersWaiti thn.initResolversAndRequesters() thn.initInterceptors() thn.initShardSender(tb) - thn.initCrossShardPeerTopicNotifier(tb) thn.initDirectConnectionProcessor(tb) for len(thn.MainMessenger.Peers()) < minPeersWaiting { @@ -527,13 +529,14 @@ func (thn *TestHeartbeatNode) initResolversAndRequesters() { return &trieMock.TrieStub{} }, }, - SizeCheckDelta: 100, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - NumConcurrentResolvingJobs: 10, - MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: payloadValidator, + SizeCheckDelta: 100, + InputAntifloodHandler: &mock.NilAntifloodHandler{}, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: payloadValidator, } requestersContainerFactoryArgs := requesterscontainer.FactoryArgs{ @@ -624,6 +627,7 @@ func (thn *TestHeartbeatNode) initInterceptors() { SignaturesHandler: &processMock.SignaturesHandlerStub{}, HeartbeatExpiryTimespanInSec: thn.heartbeatExpiryTimespanInSec, PeerID: thn.MainMessenger.ID(), + RelayedTxV3Processor: &processMocks.RelayedTxV3ProcessorMock{}, } thn.createPeerAuthInterceptor(argsFactory) @@ -793,29 +797,6 @@ func (thn *TestHeartbeatNode) initDirectConnectionProcessor(tb testing.TB) { require.Nil(tb, err) } -func (thn *TestHeartbeatNode) initCrossShardPeerTopicNotifier(tb testing.TB) { - argsCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: thn.ShardCoordinator, - PeerShardMapper: thn.MainPeerShardMapper, - } - crossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) - require.Nil(tb, err) - - err = thn.MainMessenger.AddPeerTopicNotifier(crossShardPeerTopicNotifier) - require.Nil(tb, err) - - argsCrossShardPeerTopicNotifier = monitor.ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: thn.ShardCoordinator, - PeerShardMapper: thn.FullArchivePeerShardMapper, - } - fullArchiveCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) - require.Nil(tb, err) - - err = thn.FullArchiveMessenger.AddPeerTopicNotifier(fullArchiveCrossShardPeerTopicNotifier) - require.Nil(tb, err) - -} - // ConnectOnMain will try to initiate a connection to the provided parameter on the main messenger func (thn *TestHeartbeatNode) ConnectOnMain(connectable Connectable) error { if check.IfNil(connectable) { @@ -861,13 +842,19 @@ func MakeDisplayTableForHeartbeatNodes(nodes map[uint32][]*TestHeartbeatNode) st for _, n := range nodesList { buffPk, _ := n.NodeKeys.MainKey.Pk.ToByteArray() + validatorMarker := "" + v, _, _ := n.NodesCoordinator.GetValidatorWithPublicKey(buffPk) + if v != nil { + validatorMarker = "*" + } + peerInfo := n.MainMessenger.GetConnectedPeersInfo() pid := n.MainMessenger.ID().Pretty() lineData := display.NewLineData( false, []string{ - core.GetTrimmedPk(hex.EncodeToString(buffPk)), + core.GetTrimmedPk(hex.EncodeToString(buffPk)) + validatorMarker, pid[len(pid)-6:], fmt.Sprintf("%d", shardId), fmt.Sprintf("%d", n.CountGlobalMessages()), diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 2e3a2007cbe..bcd5419cfa4 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -69,6 +69,8 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" testStorage "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" testcommonStorage "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -109,7 +111,6 @@ const ( adaptivity = false hysteresis = float32(0.2) maxTrieLevelInMemory = uint(5) - delegationManagementKey = "delegationManagement" delegationContractsList = "delegationContracts" ) @@ -153,7 +154,7 @@ func createP2PConfig(initialPeerList []string) p2pConfig.P2PConfig { Enabled: true, Type: "optimized", RefreshIntervalInSec: 2, - ProtocolID: "/erd/kad/1.0.0", + ProtocolIDs: []string{"/erd/kad/1.0.0"}, InitialPeerList: initialPeerList, BucketSize: 100, RoutingTableRefreshIntervalInSec: 100, @@ -647,7 +648,7 @@ func CreateFullGenesisBlocks( gasSchedule := wasmConfig.MakeGasMapForTests() defaults.FillGasMapInternal(gasSchedule, 1) - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(enableEpochsConfig) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.TxMarshalizerField = TestTxSignMarshalizer coreComponents.HasherField = TestHasher @@ -665,8 +666,6 @@ func CreateFullGenesisBlocks( dataComponents.DataPool = dataPool dataComponents.BlockChain = blkc - roundsConfig := GetDefaultRoundsConfig() - argsGenesis := genesisProcess.ArgsGenesisBlockCreator{ Core: coreComponents, Data: dataComponents, @@ -683,6 +682,7 @@ func CreateFullGenesisBlocks( WasmVMVersions: []config.WasmVMVersionByEpoch{ {StartEpoch: 0, Version: "*"}, }, + TransferAndExecuteByUserAddresses: []string{"erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c"}, }, TrieStorageManagers: trieStorageManagers, SystemSCConfig: config.SystemSmartContractsConfig{ @@ -716,6 +716,8 @@ func CreateFullGenesisBlocks( MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -726,14 +728,21 @@ func CreateFullGenesisBlocks( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, AccountsParser: accountsParser, SmartContractParser: smartContractParser, BlockSignKeyGen: &mock.KeyGenMock{}, - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: enableEpochsConfig, }, - RoundConfig: &roundsConfig, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, } @@ -764,7 +773,7 @@ func CreateGenesisMetaBlock( gasSchedule := wasmConfig.MakeGasMapForTests() defaults.FillGasMapInternal(gasSchedule, 1) - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(enableEpochsConfig) coreComponents.InternalMarshalizerField = marshalizer coreComponents.HasherField = hasher coreComponents.Uint64ByteSliceConverterField = uint64Converter @@ -791,6 +800,7 @@ func CreateGenesisMetaBlock( WasmVMVersions: []config.WasmVMVersionByEpoch{ {StartEpoch: 0, Version: "*"}, }, + TransferAndExecuteByUserAddresses: []string{"erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe3"}, }, HardForkConfig: config.HardforkConfig{}, SystemSCConfig: config.SystemSmartContractsConfig{ @@ -824,6 +834,8 @@ func CreateGenesisMetaBlock( MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -834,12 +846,20 @@ func CreateGenesisMetaBlock( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, BlockSignKeyGen: &mock.KeyGenMock{}, GenesisNodePrice: big.NewInt(1000), - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: enableEpochsConfig, }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, } @@ -1038,15 +1058,17 @@ func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionPr return fee }, }, - ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, - BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, - ArgsParser: smartContract.NewArgumentParser(), - ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, - EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, - TxLogsProcessor: &mock.TxLogsProcessorStub{}, + ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, + BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, + ArgsParser: smartContract.NewArgumentParser(), + ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, + EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + TxLogsProcessor: &mock.TxLogsProcessorStub{}, + RelayedTxV3Processor: &processMocks.RelayedTxV3ProcessorMock{}, + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } txProcessor, _ := txProc.NewTxProcessor(argsNewTxProcessor) @@ -1381,7 +1403,7 @@ func CreateNodesWithEnableEpochsAndVmConfig( nodesPerShard, numMetaChainNodes, epochConfig, - GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, ) } @@ -1521,6 +1543,9 @@ func CreateNodesWithFullGenesis( ) ([]*TestProcessorNode, *TestProcessorNode) { enableEpochsConfig := GetDefaultEnableEpochsConfig() enableEpochsConfig.StakingV2EnableEpoch = UnreachableEpoch + enableEpochsConfig.StakingV4Step1EnableEpoch = UnreachableEpoch + enableEpochsConfig.StakingV4Step2EnableEpoch = UnreachableEpoch + enableEpochsConfig.StakingV4Step3EnableEpoch = UnreachableEpoch return CreateNodesWithFullGenesisCustomEnableEpochs(numOfShards, nodesPerShard, numMetaChainNodes, genesisFile, enableEpochsConfig) } @@ -2168,7 +2193,7 @@ func generateValidTx( _ = accnts.SaveAccount(acc) _, _ = accnts.Commit() - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.TxMarshalizerField = TestTxSignMarshalizer coreComponents.VmMarshalizerField = TestMarshalizer @@ -2609,18 +2634,7 @@ func SaveDelegationManagerConfig(nodes []*TestProcessorNode) { continue } - acc, _ := n.AccntState.LoadAccount(vm.DelegationManagerSCAddress) - userAcc, _ := acc.(state.UserAccountHandler) - - managementData := &systemSmartContracts.DelegationManagement{ - MinDeposit: big.NewInt(100), - LastAddress: vm.FirstDelegationSCAddress, - MinDelegationAmount: big.NewInt(1), - } - marshaledData, _ := TestMarshalizer.Marshal(managementData) - _ = userAcc.SaveKeyValue([]byte(delegationManagementKey), marshaledData) - _ = n.AccntState.SaveAccount(userAcc) - _, _ = n.AccntState.Commit() + stakingcommon.SaveDelegationManagerConfig(n.AccntState, TestMarshalizer) } } diff --git a/integrationTests/testNetwork.go b/integrationTests/testNetwork.go index e22222d41a7..a08b3aa85c7 100644 --- a/integrationTests/testNetwork.go +++ b/integrationTests/testNetwork.go @@ -34,7 +34,7 @@ type GasScheduleMap = map[string]map[string]uint64 // TestNetwork wraps a set of TestProcessorNodes along with a set of test // Wallets, instantiates them, controls them and provides operations with them; // designed to be used in integration tests. -// TODO combine TestNetwork with the preexisting TestContext and OneNodeNetwork +// TODO combine TestNetwork with the preexisting TestContext and MiniNetwork // into a single struct containing the functionality of all three type TestNetwork struct { NumShards int diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index f63e7093959..a4bc698cef3 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -114,7 +114,9 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -222,6 +224,18 @@ const sizeCheckDelta = 100 // UnreachableEpoch defines an unreachable epoch for integration tests const UnreachableEpoch = uint32(1000000) +// StakingV4Step1EnableEpoch defines the epoch for integration tests when stakingV4 init is enabled +const StakingV4Step1EnableEpoch = 4443 + +// StakingV4Step2EnableEpoch defines the epoch for integration tests when stakingV4 is enabled; should be greater than StakingV2Epoch +const StakingV4Step2EnableEpoch = 4444 + +// StakingV4Step3EnableEpoch defines the epoch for integration tests when nodes distribution from auction to waiting list is enabled in staking v4 +const StakingV4Step3EnableEpoch = 4445 + +// ScheduledMiniBlocksEnableEpoch defines the epoch for integration tests when scheduled nini blocks are enabled +const ScheduledMiniBlocksEnableEpoch = 1000 + // TestSingleSigner defines a Ed25519Signer var TestSingleSigner = &ed25519SingleSig.Ed25519Signer{} @@ -483,7 +497,7 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { } if args.RoundsConfig == nil { - defaultRoundsConfig := GetDefaultRoundsConfig() + defaultRoundsConfig := testscommon.GetDefaultRoundsConfig() args.RoundsConfig = &defaultRoundsConfig } genericRoundNotifier := forking.NewGenericRoundNotifier() @@ -653,7 +667,7 @@ func (tpn *TestProcessorNode) initValidatorStatistics() { rater, _ := rating.NewBlockSigningRater(tpn.RatingsData) if check.IfNil(tpn.NodesSetup) { - tpn.NodesSetup = &mock.NodesSetupStub{ + tpn.NodesSetup = &genesisMocks.NodesSetupStub{ MinNumberOfNodesCalled: func() uint32 { return tpn.ShardCoordinator.NumberOfShards() * 2 }, @@ -943,6 +957,8 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -953,12 +969,19 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str MinServiceFee: 0, MaxServiceFee: 100000, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: tpn.PeerState, UserAccountsDB: tpn.AccntState, ChanceComputer: tpn.NodesCoordinator, ShardCoordinator: tpn.ShardCoordinator, EnableEpochsHandler: tpn.EnableEpochsHandler, + NodesCoordinator: tpn.NodesCoordinator, } tpn.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: tpn.EnableEpochs.DelegationSmartContractEnableEpoch, @@ -981,6 +1004,7 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str WasmVMChangeLocker: tpn.WasmVMChangeLocker, ESDTTransferParser: esdtTransferParser, Hasher: TestHasher, + PubKeyConverter: TestAddressPubkeyConverter, } vmFactory, _ = shard.NewVMContainerFactory(argsNewVMFactory) } @@ -1082,11 +1106,10 @@ func (tpn *TestProcessorNode) initChainHandler() { func (tpn *TestProcessorNode) initEconomicsData(economicsConfig *config.EconomicsConfig) { tpn.EnableEpochs.PenalizedTooMuchGasEnableEpoch = 0 argsNewEconomicsData := economics.ArgsNewEconomicsData{ - Economics: economicsConfig, - EpochNotifier: tpn.EpochNotifier, - EnableEpochsHandler: tpn.EnableEpochsHandler, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + Economics: economicsConfig, + EpochNotifier: tpn.EpochNotifier, + EnableEpochsHandler: tpn.EnableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) tpn.EconomicsData = economics.NewTestEconomicsData(economicsData) @@ -1241,7 +1264,7 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { tpn.EpochStartNotifier = notifier.NewEpochStartSubscriptionHandler() } - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.TxMarshalizerField = TestTxSignMarshalizer coreComponents.HasherField = TestHasher @@ -1266,6 +1289,12 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { cryptoComponents.BlKeyGen = tpn.OwnAccount.KeygenBlockSign cryptoComponents.TxKeyGen = tpn.OwnAccount.KeygenTxSign + relayedV3TxProcessor, _ := transaction.NewRelayedTxV3Processor(transaction.ArgRelayedTxV3Processor{ + EconomicsFee: tpn.EconomicsData, + ShardCoordinator: tpn.ShardCoordinator, + MaxTransactionsAllowed: 10, + }) + if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { argsEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ GenesisTime: tpn.RoundHandler.TimeStamp(), @@ -1318,6 +1347,7 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { FullArchivePeerShardMapper: tpn.FullArchivePeerShardMapper, HardforkTrigger: tpn.HardforkTrigger, NodeOperationMode: tpn.NodeOperationMode, + RelayedTxV3Processor: relayedV3TxProcessor, } interceptorContainerFactory, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(metaInterceptorContainerFactoryArgs) @@ -1386,6 +1416,7 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { FullArchivePeerShardMapper: tpn.FullArchivePeerShardMapper, HardforkTrigger: tpn.HardforkTrigger, NodeOperationMode: tpn.NodeOperationMode, + RelayedTxV3Processor: relayedV3TxProcessor, } interceptorContainerFactory, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(shardIntereptorContainerFactoryArgs) @@ -1435,22 +1466,23 @@ func (tpn *TestProcessorNode) initResolvers() { fullArchivePreferredPeersHolder, _ := p2pFactory.NewPeersHolder([]string{}) resolverContainerFactory := resolverscontainer.FactoryArgs{ - ShardCoordinator: tpn.ShardCoordinator, - MainMessenger: tpn.MainMessenger, - FullArchiveMessenger: tpn.FullArchiveMessenger, - Store: tpn.Storage, - Marshalizer: TestMarshalizer, - DataPools: tpn.DataPool, - Uint64ByteSliceConverter: TestUint64Converter, - DataPacker: dataPacker, - TriesContainer: tpn.TrieContainer, - SizeCheckDelta: 100, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - NumConcurrentResolvingJobs: 10, - MainPreferredPeersHolder: preferredPeersHolder, - FullArchivePreferredPeersHolder: fullArchivePreferredPeersHolder, - PayloadValidator: payloadValidator, + ShardCoordinator: tpn.ShardCoordinator, + MainMessenger: tpn.MainMessenger, + FullArchiveMessenger: tpn.FullArchiveMessenger, + Store: tpn.Storage, + Marshalizer: TestMarshalizer, + DataPools: tpn.DataPool, + Uint64ByteSliceConverter: TestUint64Converter, + DataPacker: dataPacker, + TriesContainer: tpn.TrieContainer, + SizeCheckDelta: 100, + InputAntifloodHandler: &mock.NilAntifloodHandler{}, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + MainPreferredPeersHolder: preferredPeersHolder, + FullArchivePreferredPeersHolder: fullArchivePreferredPeersHolder, + PayloadValidator: payloadValidator, } var err error @@ -1527,7 +1559,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u } if tpn.ValidatorStatisticsProcessor == nil { - tpn.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + tpn.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ UpdatePeerStateCalled: func(header data.MetaHeaderHandler) ([]byte, error) { return []byte("validator statistics root hash"), nil }, @@ -1636,6 +1668,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u WasmVMChangeLocker: tpn.WasmVMChangeLocker, ESDTTransferParser: esdtTransferParser, Hasher: TestHasher, + PubKeyConverter: TestAddressPubkeyConverter, } vmFactory, _ := shard.NewVMContainerFactory(argsNewVMFactory) @@ -1652,7 +1685,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u mockVM.GasForOperation = OpGasValueForMockVm _ = tpn.VMContainer.Add(procFactory.InternalTestingVM, mockVM) - tpn.FeeAccumulator, _ = postprocess.NewFeeAccumulator() + tpn.FeeAccumulator = postprocess.NewFeeAccumulator() tpn.ArgsParser = smartContract.NewArgumentParser() argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ @@ -1664,56 +1697,66 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u EnableEpochsHandler: tpn.EnableEpochsHandler, } txTypeHandler, _ := coordinator.NewTxTypeHandler(argsTxTypeHandler) + _ = tpn.EconomicsData.SetTxTypeHandler(txTypeHandler) tpn.GasHandler, _ = preprocess.NewGasComputation(tpn.EconomicsData, txTypeHandler, tpn.EnableEpochsHandler) badBlocksHandler, _ := tpn.InterimProcContainer.Get(dataBlock.InvalidBlock) argsNewScProcessor := scrCommon.ArgsNewSmartContractProcessor{ - VmContainer: tpn.VMContainer, - ArgsParser: tpn.ArgsParser, - Hasher: TestHasher, - Marshalizer: TestMarshalizer, - AccountsDB: tpn.AccntState, - BlockChainHook: vmFactory.BlockChainHookImpl(), - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - PubkeyConv: TestAddressPubkeyConverter, - ShardCoordinator: tpn.ShardCoordinator, - ScrForwarder: tpn.ScrForwarder, - TxFeeHandler: tpn.FeeAccumulator, - EconomicsFee: tpn.EconomicsData, - TxTypeHandler: txTypeHandler, - GasHandler: tpn.GasHandler, - GasSchedule: gasSchedule, - TxLogsProcessor: tpn.TransactionLogProcessor, - BadTxForwarder: badBlocksHandler, - EnableRoundsHandler: tpn.EnableRoundsHandler, - EnableEpochsHandler: tpn.EnableEpochsHandler, - VMOutputCacher: txcache.NewDisabledCache(), - WasmVMChangeLocker: tpn.WasmVMChangeLocker, + VmContainer: tpn.VMContainer, + ArgsParser: tpn.ArgsParser, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + AccountsDB: tpn.AccntState, + BlockChainHook: vmFactory.BlockChainHookImpl(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + PubkeyConv: TestAddressPubkeyConverter, + ShardCoordinator: tpn.ShardCoordinator, + ScrForwarder: tpn.ScrForwarder, + TxFeeHandler: tpn.FeeAccumulator, + EconomicsFee: tpn.EconomicsData, + TxTypeHandler: txTypeHandler, + GasHandler: tpn.GasHandler, + GasSchedule: gasSchedule, + TxLogsProcessor: tpn.TransactionLogProcessor, + BadTxForwarder: badBlocksHandler, + EnableRoundsHandler: tpn.EnableRoundsHandler, + EnableEpochsHandler: tpn.EnableEpochsHandler, + VMOutputCacher: txcache.NewDisabledCache(), + WasmVMChangeLocker: tpn.WasmVMChangeLocker, + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } tpn.ScProcessor, _ = processProxy.NewTestSmartContractProcessorProxy(argsNewScProcessor, tpn.EpochNotifier) + relayedV3TxProcessor, _ := transaction.NewRelayedTxV3Processor(transaction.ArgRelayedTxV3Processor{ + EconomicsFee: tpn.EconomicsData, + ShardCoordinator: tpn.ShardCoordinator, + MaxTransactionsAllowed: 10, + }) + receiptsHandler, _ := tpn.InterimProcContainer.Get(dataBlock.ReceiptBlock) argsNewTxProcessor := transaction.ArgsNewTxProcessor{ - Accounts: tpn.AccntState, - Hasher: TestHasher, - PubkeyConv: TestAddressPubkeyConverter, - Marshalizer: TestMarshalizer, - SignMarshalizer: TestTxSignMarshalizer, - ShardCoordinator: tpn.ShardCoordinator, - ScProcessor: tpn.ScProcessor, - TxFeeHandler: tpn.FeeAccumulator, - TxTypeHandler: txTypeHandler, - EconomicsFee: tpn.EconomicsData, - ReceiptForwarder: receiptsHandler, - BadTxForwarder: badBlocksHandler, - ArgsParser: tpn.ArgsParser, - ScrForwarder: tpn.ScrForwarder, - EnableRoundsHandler: tpn.EnableRoundsHandler, - EnableEpochsHandler: tpn.EnableEpochsHandler, - GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - TxLogsProcessor: tpn.TransactionLogProcessor, + Accounts: tpn.AccntState, + Hasher: TestHasher, + PubkeyConv: TestAddressPubkeyConverter, + Marshalizer: TestMarshalizer, + SignMarshalizer: TestTxSignMarshalizer, + ShardCoordinator: tpn.ShardCoordinator, + ScProcessor: tpn.ScProcessor, + TxFeeHandler: tpn.FeeAccumulator, + TxTypeHandler: txTypeHandler, + EconomicsFee: tpn.EconomicsData, + ReceiptForwarder: receiptsHandler, + BadTxForwarder: badBlocksHandler, + ArgsParser: tpn.ArgsParser, + ScrForwarder: tpn.ScrForwarder, + EnableRoundsHandler: tpn.EnableRoundsHandler, + EnableEpochsHandler: tpn.EnableEpochsHandler, + GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxLogsProcessor: tpn.TransactionLogProcessor, + RelayedTxV3Processor: relayedV3TxProcessor, + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } tpn.TxProcessor, _ = transaction.NewTxProcessor(argsNewTxProcessor) scheduledSCRsStorer, _ := tpn.Storage.GetStorer(dataRetriever.ScheduledSCRsUnit) @@ -1899,6 +1942,8 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -1909,12 +1954,19 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri MinServiceFee: 0, MaxServiceFee: 100000, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: tpn.PeerState, UserAccountsDB: tpn.AccntState, ChanceComputer: &mock.RaterMock{}, ShardCoordinator: tpn.ShardCoordinator, EnableEpochsHandler: tpn.EnableEpochsHandler, + NodesCoordinator: tpn.NodesCoordinator, } vmFactory, _ := metaProcess.NewVMContainerFactory(argsVMContainerFactory) @@ -1924,7 +1976,7 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri tpn.SystemSCFactory = vmFactory.SystemSmartContractContainerFactory() tpn.addMockVm(tpn.BlockchainHook) - tpn.FeeAccumulator, _ = postprocess.NewFeeAccumulator() + tpn.FeeAccumulator = postprocess.NewFeeAccumulator() tpn.ArgsParser = smartContract.NewArgumentParser() esdtTransferParser, _ := parsers.NewESDTTransferParser(TestMarshalizer) argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ @@ -1936,30 +1988,32 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri EnableEpochsHandler: tpn.EnableEpochsHandler, } txTypeHandler, _ := coordinator.NewTxTypeHandler(argsTxTypeHandler) + _ = tpn.EconomicsData.SetTxTypeHandler(txTypeHandler) tpn.GasHandler, _ = preprocess.NewGasComputation(tpn.EconomicsData, txTypeHandler, tpn.EnableEpochsHandler) badBlocksHandler, _ := tpn.InterimProcContainer.Get(dataBlock.InvalidBlock) argsNewScProcessor := scrCommon.ArgsNewSmartContractProcessor{ - VmContainer: tpn.VMContainer, - ArgsParser: tpn.ArgsParser, - Hasher: TestHasher, - Marshalizer: TestMarshalizer, - AccountsDB: tpn.AccntState, - BlockChainHook: vmFactory.BlockChainHookImpl(), - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - PubkeyConv: TestAddressPubkeyConverter, - ShardCoordinator: tpn.ShardCoordinator, - ScrForwarder: tpn.ScrForwarder, - TxFeeHandler: tpn.FeeAccumulator, - EconomicsFee: tpn.EconomicsData, - TxTypeHandler: txTypeHandler, - GasHandler: tpn.GasHandler, - GasSchedule: gasSchedule, - TxLogsProcessor: tpn.TransactionLogProcessor, - BadTxForwarder: badBlocksHandler, - EnableRoundsHandler: tpn.EnableRoundsHandler, - EnableEpochsHandler: tpn.EnableEpochsHandler, - VMOutputCacher: txcache.NewDisabledCache(), - WasmVMChangeLocker: tpn.WasmVMChangeLocker, + VmContainer: tpn.VMContainer, + ArgsParser: tpn.ArgsParser, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + AccountsDB: tpn.AccntState, + BlockChainHook: vmFactory.BlockChainHookImpl(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + PubkeyConv: TestAddressPubkeyConverter, + ShardCoordinator: tpn.ShardCoordinator, + ScrForwarder: tpn.ScrForwarder, + TxFeeHandler: tpn.FeeAccumulator, + EconomicsFee: tpn.EconomicsData, + TxTypeHandler: txTypeHandler, + GasHandler: tpn.GasHandler, + GasSchedule: gasSchedule, + TxLogsProcessor: tpn.TransactionLogProcessor, + BadTxForwarder: badBlocksHandler, + EnableRoundsHandler: tpn.EnableRoundsHandler, + EnableEpochsHandler: tpn.EnableEpochsHandler, + VMOutputCacher: txcache.NewDisabledCache(), + WasmVMChangeLocker: tpn.WasmVMChangeLocker, + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } tpn.ScProcessor, _ = processProxy.NewTestSmartContractProcessorProxy(argsNewScProcessor, tpn.EpochNotifier) @@ -2071,7 +2125,7 @@ func (tpn *TestProcessorNode) InitDelegationManager() { log.Error("error while initializing system SC", "return code", vmOutput.ReturnCode) } - err = tpn.processSCOutputAccounts(vmOutput) + err = ProcessSCOutputAccounts(vmOutput, tpn.AccntState) log.LogIfError(err) err = tpn.updateSystemSCContractsCode(vmInput.ContractCodeMetadata, vm.DelegationManagerSCAddress) @@ -2094,39 +2148,6 @@ func (tpn *TestProcessorNode) updateSystemSCContractsCode(contractMetadata []byt return tpn.AccntState.SaveAccount(userAcc) } -// save account changes in state from vmOutput - protected by VM - every output can be treated as is. -func (tpn *TestProcessorNode) processSCOutputAccounts(vmOutput *vmcommon.VMOutput) error { - outputAccounts := process.SortVMOutputInsideData(vmOutput) - for _, outAcc := range outputAccounts { - acc, err := tpn.getUserAccount(outAcc.Address) - if err != nil { - return err - } - - storageUpdates := process.GetSortedStorageUpdates(outAcc) - for _, storeUpdate := range storageUpdates { - err = acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) - if err != nil { - return err - } - } - - if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { - err = acc.AddToBalance(outAcc.BalanceDelta) - if err != nil { - return err - } - } - - err = tpn.AccntState.SaveAccount(acc) - if err != nil { - return err - } - } - - return nil -} - func (tpn *TestProcessorNode) getUserAccount(address []byte) (state.UserAccountHandler, error) { acnt, err := tpn.AccntState.LoadAccount(address) if err != nil { @@ -2161,7 +2182,7 @@ func (tpn *TestProcessorNode) initBlockProcessor() { accountsDb[state.UserAccountsState] = tpn.AccntState accountsDb[state.PeerAccountsState] = tpn.PeerState - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.HasherField = TestHasher coreComponents.Uint64ByteSliceConverterField = TestUint64Converter @@ -2214,6 +2235,7 @@ func (tpn *TestProcessorNode) initBlockProcessor() { OutportDataProvider: &outport.OutportDataProviderStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, } if check.IfNil(tpn.EpochStartNotifier) { @@ -2288,7 +2310,13 @@ func (tpn *TestProcessorNode) initBlockProcessor() { if errGet != nil { log.Error("initBlockProcessor tpn.VMContainer.Get", "error", errGet) } - stakingDataProvider, errRsp := metachain.NewStakingDataProvider(systemVM, "1000") + + argsStakingDataProvider := metachain.StakingDataProviderArgs{ + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + SystemVM: systemVM, + MinNodePrice: "1000", + } + stakingDataProvider, errRsp := metachain.NewStakingDataProvider(argsStakingDataProvider) if errRsp != nil { log.Error("initBlockProcessor NewRewardsStakingProvider", "error", errRsp) } @@ -2327,23 +2355,52 @@ func (tpn *TestProcessorNode) initBlockProcessor() { EnableEpochsHandler: tpn.EnableEpochsHandler, } epochStartValidatorInfo, _ := metachain.NewValidatorInfoCreator(argsEpochValidatorInfo) + + maxNodesChangeConfigProvider, _ := notifier.NewNodesConfigProvider( + tpn.EpochNotifier, + nil, + ) + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := metachain.NewAuctionListDisplayer(metachain.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachain.NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + }) + + argsAuctionListSelector := metachain.AuctionListSelectorArgs{ + ShardCoordinator: tpn.ShardCoordinator, + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + auctionListSelector, _ := metachain.NewAuctionListSelector(argsAuctionListSelector) + argsEpochSystemSC := metachain.ArgsNewEpochStartSystemSCProcessing{ - SystemVM: systemVM, - UserAccountsDB: tpn.AccntState, - PeerAccountsDB: tpn.PeerState, - Marshalizer: TestMarshalizer, - StartRating: tpn.RatingsData.StartRating(), - ValidatorInfoCreator: tpn.ValidatorStatisticsProcessor, - EndOfEpochCallerAddress: vm.EndOfEpochAddress, - StakingSCAddress: vm.StakingSCAddress, - ChanceComputer: tpn.NodesCoordinator, - EpochNotifier: tpn.EpochNotifier, - GenesisNodesConfig: tpn.NodesSetup, - StakingDataProvider: stakingDataProvider, - NodesConfigProvider: tpn.NodesCoordinator, - ShardCoordinator: tpn.ShardCoordinator, - ESDTOwnerAddressBytes: vm.EndOfEpochAddress, - EnableEpochsHandler: tpn.EnableEpochsHandler, + SystemVM: systemVM, + UserAccountsDB: tpn.AccntState, + PeerAccountsDB: tpn.PeerState, + Marshalizer: TestMarshalizer, + StartRating: tpn.RatingsData.StartRating(), + ValidatorInfoCreator: tpn.ValidatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: tpn.NodesCoordinator, + EpochNotifier: tpn.EpochNotifier, + GenesisNodesConfig: tpn.NodesSetup, + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: tpn.NodesCoordinator, + ShardCoordinator: tpn.ShardCoordinator, + ESDTOwnerAddressBytes: vm.EndOfEpochAddress, + EnableEpochsHandler: tpn.EnableEpochsHandler, + AuctionListSelector: auctionListSelector, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, } epochStartSystemSCProcessor, _ := metachain.NewSystemSCProcessor(argsEpochSystemSC) tpn.EpochStartSystemSCProcessor = epochStartSystemSCProcessor @@ -2427,7 +2484,7 @@ func (tpn *TestProcessorNode) initNode() { AppStatusHandlerField: tpn.AppStatusHandler, } - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.VmMarshalizerField = TestVmMarshalizer coreComponents.TxMarshalizerField = TestTxSignMarshalizer @@ -2559,21 +2616,22 @@ func (tpn *TestProcessorNode) SendTransaction(tx *dataTransaction.Transaction) ( guardianAddress = TestAddressPubkeyConverter.SilentEncode(tx.GuardianAddr, log) } createTxArgs := &external.ArgsCreateTransaction{ - Nonce: tx.Nonce, - Value: tx.Value.String(), - Receiver: encodedRcvAddr, - ReceiverUsername: nil, - Sender: encodedSndAddr, - SenderUsername: nil, - GasPrice: tx.GasPrice, - GasLimit: tx.GasLimit, - DataField: tx.Data, - SignatureHex: hex.EncodeToString(tx.Signature), - ChainID: string(tx.ChainID), - Version: tx.Version, - Options: tx.Options, - Guardian: guardianAddress, - GuardianSigHex: hex.EncodeToString(tx.GuardianSignature), + Nonce: tx.Nonce, + Value: tx.Value.String(), + Receiver: encodedRcvAddr, + ReceiverUsername: nil, + Sender: encodedSndAddr, + SenderUsername: nil, + GasPrice: tx.GasPrice, + GasLimit: tx.GasLimit, + DataField: tx.Data, + SignatureHex: hex.EncodeToString(tx.Signature), + ChainID: string(tx.ChainID), + Version: tx.Version, + Options: tx.Options, + Guardian: guardianAddress, + GuardianSigHex: hex.EncodeToString(tx.GuardianSignature), + InnerTransactions: tx.InnerTransactions, } tx, txHash, err := tpn.Node.CreateTransaction(createTxArgs) if err != nil { @@ -3065,14 +3123,14 @@ func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger() { processComponents.ShardCoord = tpn.ShardCoordinator processComponents.IntContainer = tpn.MainInterceptorsContainer processComponents.FullArchiveIntContainer = tpn.FullArchiveInterceptorsContainer - processComponents.ValidatorStatistics = &mock.ValidatorStatisticsProcessorStub{ - GetValidatorInfoForRootHashCalled: func(_ []byte) (map[uint32][]*state.ValidatorInfo, error) { - return map[uint32][]*state.ValidatorInfo{ - 0: {{PublicKey: []byte("pk0")}}, - }, nil + processComponents.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + GetValidatorInfoForRootHashCalled: func(_ []byte) (state.ShardValidatorsInfoMapHandler, error) { + ret := state.NewShardValidatorsInfoMap() + _ = ret.Add(&state.ValidatorInfo{PublicKey: []byte("pk0")}) + return ret, nil }, } - processComponents.ValidatorProvider = &mock.ValidatorsProviderStub{} + processComponents.ValidatorProvider = &stakingcommon.ValidatorsProviderStub{} processComponents.EpochTrigger = tpn.EpochStartTrigger processComponents.EpochNotifier = tpn.EpochStartNotifier processComponents.WhiteListerVerifiedTxsInternal = tpn.WhiteListerVerifiedTxs @@ -3187,12 +3245,10 @@ func CreateEnableEpochsConfig() config.EnableEpochs { SaveJailedAlwaysEnableEpoch: UnreachableEpoch, ValidatorToDelegationEnableEpoch: UnreachableEpoch, ReDelegateBelowMinCheckEnableEpoch: UnreachableEpoch, - WaitingListFixEnableEpoch: UnreachableEpoch, IncrementSCRNonceInMultiTransferEnableEpoch: UnreachableEpoch, ESDTMultiTransferEnableEpoch: UnreachableEpoch, GlobalMintBurnDisableEpoch: UnreachableEpoch, ESDTTransferRoleEnableEpoch: UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: UnreachableEpoch, ComputeRewardCheckpointEnableEpoch: UnreachableEpoch, SCRSizeInvariantCheckEnableEpoch: UnreachableEpoch, BackwardCompSaveKeyValueEnableEpoch: UnreachableEpoch, @@ -3223,14 +3279,15 @@ func CreateEnableEpochsConfig() config.EnableEpochs { MiniBlockPartialExecutionEnableEpoch: UnreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: UnreachableEpoch, SCProcessorV2EnableEpoch: UnreachableEpoch, + RelayedTransactionsV3EnableEpoch: UnreachableEpoch, + FixRelayedBaseCostEnableEpoch: UnreachableEpoch, } } // GetDefaultCoreComponents - -func GetDefaultCoreComponents() *mock.CoreComponentsStub { - enableEpochsCfg := CreateEnableEpochsConfig() +func GetDefaultCoreComponents(enableEpochsConfig config.EnableEpochs) *mock.CoreComponentsStub { genericEpochNotifier := forking.NewGenericEpochNotifier() - enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsCfg, genericEpochNotifier) + enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, genericEpochNotifier) return &mock.CoreComponentsStub{ InternalMarshalizerField: TestMarshalizer, @@ -3256,7 +3313,7 @@ func GetDefaultCoreComponents() *mock.CoreComponentsStub { EconomicsDataField: &economicsmocks.EconomicsHandlerStub{}, RatingsDataField: &testscommon.RatingsInfoMock{}, RaterField: &testscommon.RaterMock{}, - GenesisNodesSetupField: &testscommon.NodesSetupStub{}, + GenesisNodesSetupField: &genesisMocks.NodesSetupStub{}, GenesisTimeField: time.Time{}, EpochNotifierField: genericEpochNotifier, EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, @@ -3285,8 +3342,8 @@ func GetDefaultProcessComponents() *mock.ProcessComponentsStub { BootSore: &mock.BoostrapStorerMock{}, HeaderSigVerif: &mock.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorStub{}, - ValidatorProvider: &mock.ValidatorsProviderStub{}, + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{}, + ValidatorProvider: &stakingcommon.ValidatorsProviderStub{}, BlockTrack: &mock.BlockTrackerStub{}, PendingMiniBlocksHdl: &mock.PendingMiniBlocksHandlerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, @@ -3309,6 +3366,11 @@ func GetDefaultProcessComponents() *mock.ProcessComponentsStub { CurrentEpochProviderInternal: &testscommon.CurrentEpochProviderStub{}, HistoryRepositoryInternal: &dblookupextMock.HistoryRepositoryStub{}, HardforkTriggerField: &testscommon.HardforkTriggerStub{}, + RelayedTxV3ProcessorField: &processMocks.RelayedTxV3ProcessorMock{ + CheckRelayedTxCalled: func(tx *dataTransaction.Transaction) error { + return nil + }, + }, } } @@ -3469,11 +3531,12 @@ func getDefaultVMConfig() *config.VirtualMachineConfig { WasmVMVersions: []config.WasmVMVersionByEpoch{ {StartEpoch: 0, Version: "*"}, }, + TransferAndExecuteByUserAddresses: []string{"erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe3"}, } } func getDefaultNodesSetup(maxShards, numNodes uint32, address []byte, pksBytes map[uint32][]byte) sharding.GenesisNodesSetupHandler { - return &mock.NodesSetupStub{ + return &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) for i := uint32(0); i < maxShards; i++ { @@ -3496,7 +3559,7 @@ func getDefaultNodesSetup(maxShards, numNodes uint32, address []byte, pksBytes m func getDefaultNodesCoordinator(maxShards uint32, pksBytes map[uint32][]byte) nodesCoordinator.NodesCoordinator { return &shardingMocks.NodesCoordinatorStub{ - ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validators []nodesCoordinator.Validator, err error) { + ComputeConsensusGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validators []nodesCoordinator.Validator, err error) { v, _ := nodesCoordinator.NewValidator(pksBytes[shardId], 1, defaultChancesSelection) return []nodesCoordinator.Validator{v}, nil }, @@ -3526,16 +3589,7 @@ func GetDefaultEnableEpochsConfig() *config.EnableEpochs { MiniBlockPartialExecutionEnableEpoch: UnreachableEpoch, FailExecutionOnEveryAPIErrorEnableEpoch: UnreachableEpoch, DynamicGasCostForDataTrieStorageLoadEnableEpoch: UnreachableEpoch, - } -} - -// GetDefaultRoundsConfig - -func GetDefaultRoundsConfig() config.RoundConfig { - return config.RoundConfig{ - RoundActivations: map[string]config.ActivationRoundByName{ - "DisableAsyncCallV1": { - Round: "18446744073709551615", - }, - }, + StakingV4Step1EnableEpoch: UnreachableEpoch, + StakingV4Step2EnableEpoch: UnreachableEpoch, } } diff --git a/integrationTests/testProcessorNodeWithCoordinator.go b/integrationTests/testProcessorNodeWithCoordinator.go index 71f6c3afd51..63392658a76 100644 --- a/integrationTests/testProcessorNodeWithCoordinator.go +++ b/integrationTests/testProcessorNodeWithCoordinator.go @@ -13,8 +13,8 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage/cache" - "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" ) @@ -48,7 +48,7 @@ func CreateProcessorNodesWithNodesCoordinator( waitingMap := GenValidatorsFromPubKeys(pubKeysWaiting, nbShards) waitingMapForNodesCoordinator, _ := nodesCoordinator.NodesInfoToValidators(waitingMap) - nodesSetup := &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + nodesSetup := &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }} @@ -76,7 +76,7 @@ func CreateProcessorNodesWithNodesCoordinator( IsFullArchive: false, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index d7de5cc05cc..42f08a62b39 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -31,6 +31,8 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" @@ -89,7 +91,7 @@ func CreateNodesWithNodesCoordinatorAndTxKeys( } waitingMapForNodesCoordinator[core.MetachainShardId] = make([]nodesCoordinator.Validator, 0) - nodesSetup := &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + nodesSetup := &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }} @@ -221,7 +223,7 @@ func CreateNodesWithNodesCoordinatorFactory( numNodes := nbShards*nodesPerShard + nbMetaNodes - nodesSetup := &mock.NodesSetupStub{ + nodesSetup := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }, @@ -236,6 +238,9 @@ func CreateNodesWithNodesCoordinatorFactory( MiniBlockPartialExecutionEnableEpoch: UnreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: UnreachableEpoch, DynamicGasCostForDataTrieStorageLoadEnableEpoch: UnreachableEpoch, + StakingV4Step1EnableEpoch: UnreachableEpoch, + StakingV4Step2EnableEpoch: UnreachableEpoch, + StakingV4Step3EnableEpoch: UnreachableEpoch, } nodesMap := make(map[uint32][]*TestProcessorNode) @@ -406,34 +411,39 @@ func CreateNodesWithNodesCoordinatorAndHeaderSigVerifier( epochStartSubscriber := notifier.NewEpochStartSubscriptionHandler() bootStorer := CreateMemUnit() - nodesSetup := &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + nodesSetup := &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, nil }} + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &marshallerMock.MarshalizerMock{}, + StakingV4Step2EnableEpoch, + ) completeNodesList := make([]Connectable, 0) for shardId, validatorList := range validatorsMap { consensusCache, _ := cache.NewLRUCache(10000) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - Shuffler: nodeShuffler, - BootStorer: bootStorer, - EpochStartNotifier: epochStartSubscriber, - ShardIDAsObserver: shardId, - NbShards: uint32(nbShards), - EligibleNodes: validatorsMapForNodesCoordinator, - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: consensusCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Shuffler: nodeShuffler, + BootStorer: bootStorer, + EpochStartNotifier: epochStartSubscriber, + ShardIDAsObserver: shardId, + NbShards: uint32(nbShards), + EligibleNodes: validatorsMapForNodesCoordinator, + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: consensusCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) @@ -519,37 +529,42 @@ func CreateNodesWithNodesCoordinatorKeygenAndSingleSigner( epochStartSubscriber := notifier.NewEpochStartSubscriptionHandler() nodeShuffler := &shardingMocks.NodeShufflerMock{} - nodesSetup := &mock.NodesSetupStub{ + nodesSetup := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }, } + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &marshallerMock.MarshalizerMock{}, + StakingV4Step2EnableEpoch, + ) completeNodesList := make([]Connectable, 0) for shardId, validatorList := range validatorsMap { bootStorer := CreateMemUnit() lruCache, _ := cache.NewLRUCache(10000) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardId, - NbShards: uint32(nbShards), - EligibleNodes: validatorsMapForNodesCoordinator, - WaitingNodes: waitingMapForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardId, + NbShards: uint32(nbShards), + EligibleNodes: validatorsMapForNodesCoordinator, + WaitingNodes: waitingMapForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } nodesCoord, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testProcessorNodeWithTestWebServer.go b/integrationTests/testProcessorNodeWithTestWebServer.go index f1a11c9d72a..b380a643660 100644 --- a/integrationTests/testProcessorNodeWithTestWebServer.go +++ b/integrationTests/testProcessorNodeWithTestWebServer.go @@ -101,7 +101,7 @@ func createFacadeArg(tpn *TestProcessorNode) nodeFacade.ArgNodeFacade { func createTestApiConfig() config.ApiRoutesConfig { routes := map[string][]string{ - "node": {"/status", "/metrics", "/heartbeatstatus", "/statistics", "/p2pstatus", "/debug", "/peerinfo", "/bootstrapstatus", "/connected-peers-ratings", "/managed-keys/count", "/managed-keys", "/managed-keys/eligible", "/managed-keys/waiting", "/waiting-epochs-left/:key"}, + "node": {"/status", "/metrics", "/heartbeatstatus", "/statistics", "/p2pstatus", "/debug", "/peerinfo", "/bootstrapstatus", "/connected-peers-ratings", "/managed-keys/count", "/managed-keys", "/loaded-keys", "/managed-keys/eligible", "/managed-keys/waiting", "/waiting-epochs-left/:key"}, "address": {"/:address", "/:address/balance", "/:address/username", "/:address/code-hash", "/:address/key/:key", "/:address/esdt", "/:address/esdt/:tokenIdentifier"}, "hardfork": {"/trigger"}, "network": {"/status", "/total-staked", "/economics", "/config"}, @@ -162,6 +162,7 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { } txTypeHandler, err := coordinator.NewTxTypeHandler(argsTxTypeHandler) log.LogIfError(err) + _ = tpn.EconomicsData.SetTxTypeHandler(txTypeHandler) argsDataFieldParser := &datafield.ArgsOperationDataFieldParser{ AddressLength: TestAddressPubkeyConverter.Len(), @@ -179,6 +180,7 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { Hasher: TestHasher, VMOutputCacher: &testscommon.CacherMock{}, DataFieldParser: dataFieldParser, + BlockChainHook: tpn.BlockchainHook, } txSimulator, err := transactionEvaluator.NewTransactionSimulator(argSimulator) @@ -194,6 +196,7 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { Accounts: wrappedAccounts, ShardCoordinator: tpn.ShardCoordinator, EnableEpochsHandler: tpn.EnableEpochsHandler, + BlockChain: tpn.BlockChain, } apiTransactionEvaluator, err := transactionEvaluator.NewAPITransactionEvaluator(argsTransactionEvaluator) log.LogIfError(err) @@ -273,7 +276,7 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { APITransactionHandler: apiTransactionHandler, APIBlockHandler: blockAPIHandler, APIInternalBlockHandler: apiInternalBlockProcessor, - GenesisNodesSetupHandler: &mock.NodesSetupStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, AccountsParser: &genesisMocks.AccountsParserStub{}, GasScheduleNotifier: &testscommon.GasScheduleNotifierMock{}, diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 6512c5a95e6..b28d5e3f953 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -45,7 +45,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { accountsDb[state.UserAccountsState] = tpn.AccntState accountsDb[state.PeerAccountsState] = tpn.PeerState - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.HasherField = TestHasher coreComponents.Uint64ByteSliceConverterField = TestUint64Converter @@ -104,6 +104,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { OutportDataProvider: &outport.OutportDataProviderStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { @@ -116,14 +117,14 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, EpochStartDataCreator: &mock.EpochStartDataCreatorStub{}, EpochEconomics: &mock.EpochEconomicsStub{}, - EpochRewardsCreator: &mock.EpochRewardsCreatorStub{}, - EpochValidatorInfoCreator: &mock.EpochValidatorInfoCreatorStub{}, - ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{ + EpochRewardsCreator: &testscommon.RewardsCreatorStub{}, + EpochValidatorInfoCreator: &testscommon.EpochValidatorInfoCreatorStub{}, + ValidatorStatisticsProcessor: &testscommon.ValidatorStatisticsProcessorStub{ UpdatePeerStateCalled: func(header data.MetaHeaderHandler) ([]byte, error) { return []byte("validator stats root hash"), nil }, }, - EpochSystemSCProcessor: &mock.EpochStartSystemSCStub{}, + EpochSystemSCProcessor: &testscommon.EpochStartSystemSCStub{}, } tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) diff --git a/integrationTests/vm/delegation/changeOwner_test.go b/integrationTests/vm/delegation/changeOwner_test.go index 2b23993882d..c634452ea9c 100644 --- a/integrationTests/vm/delegation/changeOwner_test.go +++ b/integrationTests/vm/delegation/changeOwner_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( @@ -23,6 +21,10 @@ var ( ) func TestDelegationChangeOwnerOnAccountHandler(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + t.Run("fix flag not activated, should not save - backwards compatibility", func(t *testing.T) { _, _, userAccount := testDelegationChangeOwnerOnAccountHandler(t, 1) diff --git a/integrationTests/vm/delegation/delegationMulti_test.go b/integrationTests/vm/delegation/delegationMulti_test.go index 90d307c741d..b0eef67dcaa 100644 --- a/integrationTests/vm/delegation/delegationMulti_test.go +++ b/integrationTests/vm/delegation/delegationMulti_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( @@ -19,6 +17,10 @@ import ( ) func TestDelegationSystemClaimMulti(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -127,6 +129,10 @@ func TestDelegationSystemClaimMulti(t *testing.T) { } func TestDelegationSystemRedelegateMulti(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, diff --git a/integrationTests/vm/delegation/delegationScenarios_test.go b/integrationTests/vm/delegation/delegationScenarios_test.go index e1d58b12d6d..4b9dbd07fba 100644 --- a/integrationTests/vm/delegation/delegationScenarios_test.go +++ b/integrationTests/vm/delegation/delegationScenarios_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( @@ -32,6 +30,10 @@ import ( ) func TestDelegationSystemNodesOperationsTestBackwardComp(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -82,6 +84,10 @@ func TestDelegationSystemNodesOperationsTestBackwardComp(t *testing.T) { } func TestDelegationSystemNodesOperations(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -163,6 +169,10 @@ func TestDelegationSystemNodesOperations(t *testing.T) { } func TestDelegationSystemReStakeNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -230,6 +240,10 @@ func TestDelegationSystemReStakeNodes(t *testing.T) { } func TestDelegationChangeConfig(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -288,6 +302,10 @@ func TestDelegationChangeConfig(t *testing.T) { } func TestDelegationSystemDelegateUnDelegateFromTopUpWithdraw(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -348,6 +366,10 @@ func TestDelegationSystemDelegateUnDelegateFromTopUpWithdraw(t *testing.T) { } func TestDelegationSystemDelegateUnDelegateOnlyPartOfDelegation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -409,6 +431,10 @@ func TestDelegationSystemDelegateUnDelegateOnlyPartOfDelegation(t *testing.T) { } func TestDelegationSystemMultipleDelegationContractsAndSameBlsKeysShouldNotWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -483,6 +509,10 @@ func TestDelegationSystemMultipleDelegationContractsAndSameBlsKeysShouldNotWork( } func TestDelegationSystemMultipleDelegationContractsAndSameDelegators(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -551,6 +581,10 @@ func TestDelegationSystemMultipleDelegationContractsAndSameDelegators(t *testing } func TestDelegationRewardsComputationAfterChangeServiceFee(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -655,6 +689,10 @@ func TestDelegationRewardsComputationAfterChangeServiceFee(t *testing.T) { } func TestDelegationUnJail(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -718,6 +756,10 @@ func TestDelegationUnJail(t *testing.T) { } func TestDelegationSystemDelegateSameUsersAFewTimes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -779,6 +821,10 @@ func TestDelegationSystemDelegateSameUsersAFewTimes(t *testing.T) { } func TestDelegationSystemMultipleDelegationContractsAndSameDelegatorsClaimRewardsMultipleTimeUndelegateClaimRewardsMultipleTime(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -931,6 +977,10 @@ func TestDelegationSystemMultipleDelegationContractsAndSameDelegatorsClaimReward } func TestDelegationSystemDelegateUnDelegateReceiveRewardsWhenAllIsUndelegated(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -1069,6 +1119,10 @@ func TestDelegationSystemDelegateUnDelegateReceiveRewardsWhenAllIsUndelegated(t } func TestDelegationSystemCleanUpContract(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, diff --git a/integrationTests/vm/delegation/delegation_test.go b/integrationTests/vm/delegation/delegation_test.go index 65ff98aab2f..9bae5235076 100644 --- a/integrationTests/vm/delegation/delegation_test.go +++ b/integrationTests/vm/delegation/delegation_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( diff --git a/integrationTests/vm/esdt/common.go b/integrationTests/vm/esdt/common.go index 3287641d0e6..0d3a798d592 100644 --- a/integrationTests/vm/esdt/common.go +++ b/integrationTests/vm/esdt/common.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/process" vmFactory "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/vm" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -170,7 +171,7 @@ func CreateNodesAndPrepareBalances(numOfShards int) ([]*integrationTests.TestPro ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, } - roundsConfig := integrationTests.GetDefaultRoundsConfig() + roundsConfig := testscommon.GetDefaultRoundsConfig() return CreateNodesAndPrepareBalancesWithEpochsAndRoundsConfig( numOfShards, enableEpochs, @@ -178,7 +179,7 @@ func CreateNodesAndPrepareBalances(numOfShards int) ([]*integrationTests.TestPro ) } -// CreateNodesAndPrepareBalances - +// CreateNodesAndPrepareBalancesWithEpochsAndRoundsConfig - func CreateNodesAndPrepareBalancesWithEpochsAndRoundsConfig(numOfShards int, enableEpochs config.EnableEpochs, roundsConfig config.RoundConfig) ([]*integrationTests.TestProcessorNode, []int) { nodesPerShard := 1 numMetachainNodes := 1 @@ -193,6 +194,7 @@ func CreateNodesAndPrepareBalancesWithEpochsAndRoundsConfig(numOfShards int, ena WasmVMVersions: []config.WasmVMVersionByEpoch{ {StartEpoch: 0, Version: "*"}, }, + TransferAndExecuteByUserAddresses: []string{"erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c"}, }, ) @@ -230,6 +232,7 @@ func IssueTestToken(nodes []*integrationTests.TestProcessorNode, initialSupply i issueTestToken(nodes, initialSupply, ticker, core.MinMetaTxExtraGasCost) } +// IssueTestTokenWithIssuerAccount - func IssueTestTokenWithIssuerAccount(nodes []*integrationTests.TestProcessorNode, issuerAccount *integrationTests.TestWalletAccount, initialSupply int64, ticker string) { issueTestTokenWithIssuerAccount(nodes, issuerAccount, initialSupply, ticker, core.MinMetaTxExtraGasCost) } @@ -302,6 +305,7 @@ func CheckNumCallBacks( } } +// CheckForwarderRawSavedCallbackArgs - func CheckForwarderRawSavedCallbackArgs( t *testing.T, address []byte, @@ -338,13 +342,14 @@ func CheckForwarderRawSavedCallbackArgs( } } -/// ForwarderRawSavedPaymentInfo contains token data to be checked in the forwarder-raw contract. +// ForwarderRawSavedPaymentInfo contains token data to be checked in the forwarder-raw contract. type ForwarderRawSavedPaymentInfo struct { TokenId string Nonce uint64 Payment *big.Int } +// CheckForwarderRawSavedCallbackPayments - func CheckForwarderRawSavedCallbackPayments( t *testing.T, address []byte, diff --git a/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_MockContracts_test.go b/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_MockContracts_test.go index e5abb053058..c088215b3c0 100644 --- a/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_MockContracts_test.go +++ b/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_MockContracts_test.go @@ -1,5 +1,3 @@ -//go:build !race - package localFuncs import ( diff --git a/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_test.go b/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_test.go index c5e9da76d9b..742531fb801 100644 --- a/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_test.go +++ b/integrationTests/vm/esdt/localFuncs/esdtLocalFunsSC_test.go @@ -1,5 +1,3 @@ -//go:build !race - package localFuncs import ( @@ -265,17 +263,22 @@ func TestESDTSetTransferRoles(t *testing.T) { } func TestESDTSetTransferRolesForwardAsyncCallFailsIntra(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testESDTWithTransferRoleAndForwarder(t, 1) } func TestESDTSetTransferRolesForwardAsyncCallFailsCross(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testESDTWithTransferRoleAndForwarder(t, 2) } func testESDTWithTransferRoleAndForwarder(t *testing.T, numShards int) { - if testing.Short() { - t.Skip("this is not a short test") - } nodes, idxProposers := esdtCommon.CreateNodesAndPrepareBalances(numShards) defer func() { @@ -325,18 +328,22 @@ func testESDTWithTransferRoleAndForwarder(t *testing.T, numShards int) { } func TestAsyncCallsAndCallBacksArgumentsIntra(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testAsyncCallAndCallBacksArguments(t, 1) } func TestAsyncCallsAndCallBacksArgumentsCross(t *testing.T) { - testAsyncCallAndCallBacksArguments(t, 2) -} - -func testAsyncCallAndCallBacksArguments(t *testing.T, numShards int) { if testing.Short() { t.Skip("this is not a short test") } + testAsyncCallAndCallBacksArguments(t, 2) +} + +func testAsyncCallAndCallBacksArguments(t *testing.T, numShards int) { nodes, idxProposers := esdtCommon.CreateNodesAndPrepareBalances(numShards) defer func() { for _, n := range nodes { diff --git a/integrationTests/vm/esdt/multisign/esdtMultisign_test.go b/integrationTests/vm/esdt/multisign/esdtMultisign_test.go index 42b2bcacbdc..fd8e0b6fbb8 100644 --- a/integrationTests/vm/esdt/multisign/esdtMultisign_test.go +++ b/integrationTests/vm/esdt/multisign/esdtMultisign_test.go @@ -1,5 +1,3 @@ -//go:build !race - package multisign import ( @@ -189,7 +187,7 @@ func deployMultisig(t *testing.T, nodes []*integrationTests.TestProcessorNode, o require.Nil(t, err) log.Info("multisign contract", "address", encodedMultisigContractAddress) - integrationTests.CreateAndSendTransaction(nodes[ownerIdx], nodes, big.NewInt(0), emptyAddress, txData, 100000) + integrationTests.CreateAndSendTransaction(nodes[ownerIdx], nodes, big.NewInt(0), emptyAddress, txData, 1000000) return multisigContractAddress } @@ -235,8 +233,8 @@ func proposeIssueTokenAndTransferFunds( params = append(params, tokenPropertiesParams...) txData := strings.Join(params, "@") - integrationTests.CreateAndSendTransaction(nodes[ownerIdx], nodes, big.NewInt(1000000), multisignContractAddress, "deposit", 100000) - integrationTests.CreateAndSendTransaction(nodes[ownerIdx], nodes, big.NewInt(0), multisignContractAddress, txData, 100000) + integrationTests.CreateAndSendTransaction(nodes[ownerIdx], nodes, big.NewInt(1000000), multisignContractAddress, "deposit", 1000000) + integrationTests.CreateAndSendTransaction(nodes[ownerIdx], nodes, big.NewInt(0), multisignContractAddress, txData, 1000000) } func getActionID(t *testing.T, nodes []*integrationTests.TestProcessorNode, multisignContractAddress []byte) []byte { @@ -286,7 +284,7 @@ func boardMembersSignActionID( } txData := strings.Join(params, "@") - integrationTests.CreateAndSendTransaction(node, nodes, big.NewInt(0), multisignContractAddress, txData, 100000) + integrationTests.CreateAndSendTransaction(node, nodes, big.NewInt(0), multisignContractAddress, txData, 1000000) } } @@ -329,5 +327,5 @@ func proposeTransferToken( params := append(multisigParams, esdtParams...) txData := strings.Join(params, "@") - integrationTests.CreateAndSendTransaction(nodes[ownerIdx], nodes, big.NewInt(0), multisignContractAddress, txData, 100000) + integrationTests.CreateAndSendTransaction(nodes[ownerIdx], nodes, big.NewInt(0), multisignContractAddress, txData, 1000000) } diff --git a/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go b/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go index 99138f77ce5..a1db92372bd 100644 --- a/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go +++ b/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go @@ -1,5 +1,3 @@ -//go:build !race - package esdtNFT import ( @@ -908,6 +906,10 @@ func testESDTSemiFungibleTokenTransferRole(t *testing.T, numOfShards int) { } func TestESDTSFTWithEnhancedTransferRole(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + nodesPerShard := 2 numMetachainNodes := 2 numOfShards := 3 diff --git a/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go b/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go index 8f62294a776..534c1c7435e 100644 --- a/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go +++ b/integrationTests/vm/esdt/nft/esdtNFTSCs/esdtNFTSCs_test.go @@ -1,5 +1,3 @@ -//go:build !race - package esdtNFTSCs import ( diff --git a/integrationTests/vm/esdt/process/esdtProcess_test.go b/integrationTests/vm/esdt/process/esdtProcess_test.go index cee94a6132b..8fa9fd04101 100644 --- a/integrationTests/vm/esdt/process/esdtProcess_test.go +++ b/integrationTests/vm/esdt/process/esdtProcess_test.go @@ -1,5 +1,3 @@ -//go:build !race - package process import ( @@ -42,7 +40,6 @@ func TestESDTIssueAndTransactionsOnMultiShardEnvironment(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, @@ -174,7 +171,6 @@ func TestESDTCallBurnOnANonBurnableToken(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, @@ -333,6 +329,10 @@ func TestESDTIssueAndSelfTransferShouldNotChangeBalance(t *testing.T) { } func TestESDTIssueFromASmartContractSimulated(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + metaNode := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, NodeShardId: core.MetachainShardId, @@ -375,7 +375,7 @@ func TestESDTIssueFromASmartContractSimulated(t *testing.T) { interimProc, _ := metaNode.InterimProcContainer.Get(block.SmartContractResultBlock) mapCreatedSCRs := interimProc.GetAllCurrentFinishedTxs() - require.Equal(t, len(mapCreatedSCRs), 2) + require.Equal(t, len(mapCreatedSCRs), 3) foundTransfer := false for _, addedSCR := range mapCreatedSCRs { foundTransfer = foundTransfer || strings.Contains(string(addedSCR.GetData()), core.BuiltInFunctionESDTTransfer) @@ -878,133 +878,6 @@ func TestCallbackPaymentEgld(t *testing.T) { }) } -func TestScCallsScWithEsdtCrossShard(t *testing.T) { - t.Skip("test is not ready yet") - - numOfShards := 2 - nodesPerShard := 2 - numMetachainNodes := 2 - - nodes := integrationTests.CreateNodes( - numOfShards, - nodesPerShard, - numMetachainNodes, - ) - - idxProposers := make([]int, numOfShards+1) - for i := 0; i < numOfShards; i++ { - idxProposers[i] = i * nodesPerShard - } - idxProposers[numOfShards] = numOfShards * nodesPerShard - - integrationTests.DisplayAndStartNodes(nodes) - - defer func() { - for _, n := range nodes { - n.Close() - } - }() - - initialVal := big.NewInt(10000000000) - integrationTests.MintAllNodes(nodes, initialVal) - - round := uint64(0) - nonce := uint64(0) - round = integrationTests.IncrementAndPrintRound(round) - nonce++ - - // send token issue - - initialSupply := int64(10000000000) - ticker := "TCK" - esdtCommon.IssueTestToken(nodes, initialSupply, ticker) - tokenIssuer := nodes[0] - - time.Sleep(time.Second) - nrRoundsToPropagateMultiShard := 12 - nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) - time.Sleep(time.Second) - - tokenIdentifier := string(integrationTests.GetTokenIdentifier(nodes, []byte(ticker))) - esdtCommon.CheckAddressHasTokens(t, tokenIssuer.OwnAccount.Address, nodes, []byte(tokenIdentifier), 0, initialSupply) - - // deploy the smart contracts - - vaultCode := wasm.GetSCCode("../testdata/vault.wasm") - secondScAddress, _ := tokenIssuer.BlockchainHook.NewAddress(tokenIssuer.OwnAccount.Address, tokenIssuer.OwnAccount.Nonce, vmFactory.WasmVirtualMachine) - - integrationTests.CreateAndSendTransaction( - nodes[0], - nodes, - big.NewInt(0), - testVm.CreateEmptyAddress(), - wasm.CreateDeployTxData(vaultCode), - integrationTests.AdditionalGasLimit, - ) - - nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, 4, nonce, round, idxProposers) - _, err := nodes[0].AccntState.GetExistingAccount(secondScAddress) - require.Nil(t, err) - - forwarderCode := wasm.GetSCCode("../testdata/forwarder-raw.wasm") - forwarder, _ := nodes[2].BlockchainHook.NewAddress(nodes[2].OwnAccount.Address, nodes[2].OwnAccount.Nonce, vmFactory.WasmVirtualMachine) - integrationTests.CreateAndSendTransaction( - nodes[2], - nodes, - big.NewInt(0), - testVm.CreateEmptyAddress(), - wasm.CreateDeployTxData(forwarderCode), - integrationTests.AdditionalGasLimit, - ) - - nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, 4, nonce, round, idxProposers) - _, err = nodes[2].AccntState.GetExistingAccount(forwarder) - require.Nil(t, err) - - txData := txDataBuilder.NewBuilder() - - // call forwarder with esdt, and the forwarder automatically calls second sc - valueToSendToSc := int64(1000) - txData.Clear().TransferESDT(tokenIdentifier, valueToSendToSc) - txData.Str("forward_async_call_half_payment").Bytes(secondScAddress).Str("accept_funds") - integrationTests.CreateAndSendTransaction(tokenIssuer, nodes, big.NewInt(0), forwarder, txData.ToString(), integrationTests.AdditionalGasLimit) - - time.Sleep(time.Second) - nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) - time.Sleep(time.Second) - - esdtCommon.CheckAddressHasTokens(t, tokenIssuer.OwnAccount.Address, nodes, []byte(tokenIdentifier), 0, initialSupply-valueToSendToSc) - esdtCommon.CheckAddressHasTokens(t, forwarder, nodes, []byte(tokenIdentifier), 0, valueToSendToSc/2) - esdtCommon.CheckAddressHasTokens(t, secondScAddress, nodes, []byte(tokenIdentifier), 0, valueToSendToSc/2) - - esdtCommon.CheckNumCallBacks(t, forwarder, nodes, 1) - esdtCommon.CheckForwarderRawSavedCallbackArgs(t, forwarder, nodes, 1, vmcommon.Ok, [][]byte{}) - esdtCommon.CheckForwarderRawSavedCallbackPayments(t, forwarder, nodes, []*esdtCommon.ForwarderRawSavedPaymentInfo{}) - - // call forwarder to ask the second one to send it back some esdt - valueToRequest := valueToSendToSc / 4 - txData.Clear().Func("forward_async_call").Bytes(secondScAddress) - txData.Str("retrieve_funds").Str(tokenIdentifier).Int64(0).Int64(valueToRequest) - integrationTests.CreateAndSendTransaction(tokenIssuer, nodes, big.NewInt(0), forwarder, txData.ToString(), integrationTests.AdditionalGasLimit) - - time.Sleep(time.Second) - _, _ = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) - time.Sleep(time.Second) - - esdtCommon.CheckAddressHasTokens(t, forwarder, nodes, []byte(tokenIdentifier), 0, valueToSendToSc*3/4) - esdtCommon.CheckAddressHasTokens(t, secondScAddress, nodes, []byte(tokenIdentifier), 0, valueToSendToSc/4) - - esdtCommon.CheckNumCallBacks(t, forwarder, nodes, 2) - esdtCommon.CheckForwarderRawSavedCallbackArgs(t, forwarder, nodes, 2, vmcommon.Ok, [][]byte{}) - esdtCommon.CheckForwarderRawSavedCallbackPayments(t, forwarder, nodes, []*esdtCommon.ForwarderRawSavedPaymentInfo{ - { - TokenId: "EGLD", - Nonce: 0, - Payment: big.NewInt(valueToSendToSc), - }, - }) -} - func TestScCallsScWithEsdtIntraShard_SecondScRefusesPayment(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") @@ -1410,12 +1283,14 @@ func TestExecOnDestWithTokenTransferFromScAtoScBWithIntermediaryExecOnDest_NotEn enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, FailExecutionOnEveryAPIErrorEnableEpoch: integrationTests.UnreachableEpoch, } arwenVersion := config.WasmVMVersionByEpoch{Version: "v1.4"} - vmConfig := &config.VirtualMachineConfig{WasmVMVersions: []config.WasmVMVersionByEpoch{arwenVersion}} + vmConfig := &config.VirtualMachineConfig{ + WasmVMVersions: []config.WasmVMVersionByEpoch{arwenVersion}, + TransferAndExecuteByUserAddresses: []string{"erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe3"}, + } nodes := integrationTests.CreateNodesWithEnableEpochsAndVmConfig( numOfShards, nodesPerShard, @@ -2106,7 +1981,6 @@ func TestIssueAndBurnESDT_MaxGasPerBlockExceeded(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, MaxBlockchainHookCountersEnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/vm/esdt/roles/esdtRoles_test.go b/integrationTests/vm/esdt/roles/esdtRoles_test.go index aa2834062c4..5c117ed4edd 100644 --- a/integrationTests/vm/esdt/roles/esdtRoles_test.go +++ b/integrationTests/vm/esdt/roles/esdtRoles_test.go @@ -1,5 +1,3 @@ -//go:build !race - package roles import ( diff --git a/integrationTests/vm/mockVM/vmDeploy/vmDeploy_test.go b/integrationTests/vm/mockVM/vmDeploy/vmDeploy_test.go index 4390a3eff47..1a53d3ce4e9 100644 --- a/integrationTests/vm/mockVM/vmDeploy/vmDeploy_test.go +++ b/integrationTests/vm/mockVM/vmDeploy/vmDeploy_test.go @@ -15,6 +15,10 @@ import ( ) func TestVmDeployWithoutTransferShouldDeploySCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -70,6 +74,10 @@ func TestVmDeployWithoutTransferShouldDeploySCCode(t *testing.T) { } func TestVmDeployWithTransferShouldDeploySCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -124,6 +132,10 @@ func TestVmDeployWithTransferShouldDeploySCCode(t *testing.T) { } func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -181,6 +193,10 @@ func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { } func TestVMDeployWithTransferWithInsufficientGasShouldReturnErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1000) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) diff --git a/integrationTests/vm/mockVM/vmGet/vmGet_test.go b/integrationTests/vm/mockVM/vmGet/vmGet_test.go index bd818df6884..5083c44a276 100644 --- a/integrationTests/vm/mockVM/vmGet/vmGet_test.go +++ b/integrationTests/vm/mockVM/vmGet/vmGet_test.go @@ -29,6 +29,10 @@ import ( ) func TestVmGetShouldReturnValue(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + accnts, destinationAddressBytes, expectedValueForVar := deploySmartContract(t) mockVM := vm.CreateOneSCExecutorMockVM(accnts) diff --git a/integrationTests/vm/mockVM/vmRunContract/vmRunContract_test.go b/integrationTests/vm/mockVM/vmRunContract/vmRunContract_test.go index 00f8ef20610..af7d0e33e47 100644 --- a/integrationTests/vm/mockVM/vmRunContract/vmRunContract_test.go +++ b/integrationTests/vm/mockVM/vmRunContract/vmRunContract_test.go @@ -19,6 +19,10 @@ import ( // TODO add integration and unit tests with generating and broadcasting transaction with empty recv address func TestRunSCWithoutTransferShouldRunSCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -89,6 +93,10 @@ func TestRunSCWithoutTransferShouldRunSCCode(t *testing.T) { } func TestRunSCWithTransferShouldRunSCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -160,6 +168,10 @@ func TestRunSCWithTransferShouldRunSCCode(t *testing.T) { } func TestRunWithTransferAndGasShouldRunSCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) @@ -231,6 +243,10 @@ func TestRunWithTransferAndGasShouldRunSCCode(t *testing.T) { } func TestRunWithTransferWithInsufficientGasShouldReturnErr(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + vmOpGas := uint64(1) senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(11) diff --git a/integrationTests/vm/staking/baseTestMetaProcessor.go b/integrationTests/vm/staking/baseTestMetaProcessor.go new file mode 100644 index 00000000000..0ae2b5ed2d8 --- /dev/null +++ b/integrationTests/vm/staking/baseTestMetaProcessor.go @@ -0,0 +1,394 @@ +package staking + +import ( + "fmt" + "math/big" + "strconv" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/process" + vmFactory "github.com/multiversx/mx-chain-go/process/factory" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + arwenConfig "github.com/multiversx/mx-chain-vm-v1_4-go/config" + "github.com/stretchr/testify/require" +) + +const ( + stakingV4Step1EnableEpoch = 1 + stakingV4Step2EnableEpoch = 2 + stakingV4Step3EnableEpoch = 3 + addressLength = 15 + nodePrice = 1000 +) + +func haveTime() bool { return true } +func noTime() bool { return false } + +type nodesConfig struct { + eligible map[uint32][][]byte + waiting map[uint32][][]byte + leaving map[uint32][][]byte + shuffledOut map[uint32][][]byte + queue [][]byte + auction [][]byte + new [][]byte +} + +// TestMetaProcessor - +type TestMetaProcessor struct { + MetaBlockProcessor process.BlockProcessor + NodesCoordinator nodesCoordinator.NodesCoordinator + ValidatorStatistics process.ValidatorStatisticsProcessor + EpochStartTrigger integrationTests.TestEpochStartTrigger + BlockChainHandler data.ChainHandler + NodesConfig nodesConfig + AccountsAdapter state.AccountsAdapter + Marshaller marshal.Marshalizer + TxCacher dataRetriever.TransactionCacher + TxCoordinator process.TransactionCoordinator + SystemVM vmcommon.VMExecutionHandler + BlockChainHook process.BlockChainHookHandler + StakingDataProvider epochStart.StakingDataProvider + + currentRound uint64 +} + +func newTestMetaProcessor( + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + bootstrapComponents factory.BootstrapComponentsHolder, + statusComponents factory.StatusComponentsHolder, + stateComponents factory.StateComponentsHandler, + nc nodesCoordinator.NodesCoordinator, + maxNodesConfig []config.MaxNodesChangeConfig, + queue [][]byte, +) *TestMetaProcessor { + saveNodesConfig( + stateComponents.AccountsAdapter(), + coreComponents.InternalMarshalizer(), + nc, + maxNodesConfig, + ) + + stakingcommon.SaveDelegationManagerConfig( + stateComponents.AccountsAdapter(), + coreComponents.InternalMarshalizer(), + ) + + gasScheduleNotifier := createGasScheduleNotifier() + argsBlockChainHook, blockChainHook := createBlockChainHook( + dataComponents, + coreComponents, + stateComponents.AccountsAdapter(), + bootstrapComponents.ShardCoordinator(), + gasScheduleNotifier, + ) + + metaVmFactory := createVMContainerFactory( + coreComponents, + gasScheduleNotifier, + blockChainHook, + argsBlockChainHook, + stateComponents, + bootstrapComponents.ShardCoordinator(), + nc, + maxNodesConfig[0].MaxNumNodes, + ) + vmContainer, _ := metaVmFactory.Create() + systemVM, _ := vmContainer.Get(vmFactory.SystemVirtualMachine) + + validatorStatisticsProcessor := createValidatorStatisticsProcessor( + dataComponents, + coreComponents, + nc, + bootstrapComponents.ShardCoordinator(), + stateComponents.PeerAccounts(), + ) + stakingDataProvider := createStakingDataProvider( + coreComponents.EnableEpochsHandler(), + systemVM, + ) + scp := createSystemSCProcessor( + nc, + coreComponents, + stateComponents, + bootstrapComponents.ShardCoordinator(), + maxNodesConfig, + validatorStatisticsProcessor, + systemVM, + stakingDataProvider, + ) + + txCoordinator := &testscommon.TransactionCoordinatorMock{} + epochStartTrigger := createEpochStartTrigger(coreComponents, dataComponents.StorageService()) + + eligible, _ := nc.GetAllEligibleValidatorsPublicKeys(0) + waiting, _ := nc.GetAllWaitingValidatorsPublicKeys(0) + shuffledOut, _ := nc.GetAllShuffledOutValidatorsPublicKeys(0) + + return &TestMetaProcessor{ + AccountsAdapter: stateComponents.AccountsAdapter(), + Marshaller: coreComponents.InternalMarshalizer(), + NodesConfig: nodesConfig{ + eligible: eligible, + waiting: waiting, + shuffledOut: shuffledOut, + queue: queue, + auction: make([][]byte, 0), + }, + MetaBlockProcessor: createMetaBlockProcessor( + nc, + scp, + coreComponents, + dataComponents, + bootstrapComponents, + statusComponents, + stateComponents, + validatorStatisticsProcessor, + blockChainHook, + metaVmFactory, + epochStartTrigger, + vmContainer, + txCoordinator, + ), + currentRound: 1, + NodesCoordinator: nc, + ValidatorStatistics: validatorStatisticsProcessor, + EpochStartTrigger: epochStartTrigger, + BlockChainHandler: dataComponents.Blockchain(), + TxCacher: dataComponents.Datapool().CurrentBlockTxs(), + TxCoordinator: txCoordinator, + SystemVM: systemVM, + BlockChainHook: blockChainHook, + StakingDataProvider: stakingDataProvider, + } +} + +func saveNodesConfig( + accountsDB state.AccountsAdapter, + marshaller marshal.Marshalizer, + nc nodesCoordinator.NodesCoordinator, + maxNodesConfig []config.MaxNodesChangeConfig, +) { + eligibleMap, _ := nc.GetAllEligibleValidatorsPublicKeys(0) + waitingMap, _ := nc.GetAllWaitingValidatorsPublicKeys(0) + allStakedNodes := int64(len(getAllPubKeys(eligibleMap)) + len(getAllPubKeys(waitingMap))) + + maxNumNodes := allStakedNodes + if len(maxNodesConfig) > 0 { + maxNumNodes = int64(maxNodesConfig[0].MaxNumNodes) + } + + stakingcommon.SaveNodesConfig( + accountsDB, + marshaller, + allStakedNodes, + 1, + maxNumNodes, + ) +} + +func createGasScheduleNotifier() core.GasScheduleNotifier { + gasSchedule := arwenConfig.MakeGasMapForTests() + defaults.FillGasMapInternal(gasSchedule, 1) + return testscommon.NewGasScheduleNotifierMock(gasSchedule) +} + +func createEpochStartTrigger( + coreComponents factory.CoreComponentsHolder, + storageService dataRetriever.StorageService, +) integrationTests.TestEpochStartTrigger { + argsEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 10, + RoundsPerEpoch: 10, + }, + Epoch: 0, + EpochStartNotifier: coreComponents.EpochStartNotifierWithConfirm(), + Storage: storageService, + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + DataPool: dataRetrieverMock.NewPoolsHolderMock(), + } + + epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsEpochStart) + testTrigger := &metachain.TestTrigger{} + testTrigger.SetTrigger(epochStartTrigger) + + return testTrigger +} + +// Process - +func (tmp *TestMetaProcessor) Process(t *testing.T, numOfRounds uint64) { + for r := tmp.currentRound; r < tmp.currentRound+numOfRounds; r++ { + header := tmp.createNewHeader(t, r) + tmp.createAndCommitBlock(t, header, haveTime) + } + + tmp.currentRound += numOfRounds +} + +func (tmp *TestMetaProcessor) createNewHeader(t *testing.T, round uint64) *block.MetaBlock { + _, err := tmp.MetaBlockProcessor.CreateNewHeader(round, round) + require.Nil(t, err) + + epoch := tmp.EpochStartTrigger.Epoch() + printNewHeaderRoundEpoch(round, epoch) + + currentHeader, currentHash := tmp.getCurrentHeaderInfo() + header := createMetaBlockToCommit( + epoch, + round, + currentHash, + currentHeader.GetRandSeed(), + tmp.NodesCoordinator.ConsensusGroupSize(core.MetachainShardId), + ) + + return header +} + +func (tmp *TestMetaProcessor) createAndCommitBlock(t *testing.T, header data.HeaderHandler, haveTime func() bool) { + newHeader, blockBody, err := tmp.MetaBlockProcessor.CreateBlock(header, haveTime) + require.Nil(t, err) + + err = tmp.MetaBlockProcessor.CommitBlock(newHeader, blockBody) + require.Nil(t, err) + + time.Sleep(time.Millisecond * 50) + tmp.updateNodesConfig(header.GetEpoch()) + tmp.displayConfig(tmp.NodesConfig) +} + +func printNewHeaderRoundEpoch(round uint64, epoch uint32) { + headline := display.Headline( + fmt.Sprintf("Commiting header in epoch %v round %v", epoch, round), + "", + delimiter, + ) + fmt.Println(headline) +} + +func (tmp *TestMetaProcessor) getCurrentHeaderInfo() (data.HeaderHandler, []byte) { + currentHeader := tmp.BlockChainHandler.GetCurrentBlockHeader() + currentHash := tmp.BlockChainHandler.GetCurrentBlockHeaderHash() + if currentHeader == nil { + currentHeader = tmp.BlockChainHandler.GetGenesisHeader() + currentHash = tmp.BlockChainHandler.GetGenesisHeaderHash() + } + + return currentHeader, currentHash +} + +func createMetaBlockToCommit( + epoch uint32, + round uint64, + prevHash []byte, + prevRandSeed []byte, + consensusSize int, +) *block.MetaBlock { + roundStr := strconv.Itoa(int(round)) + hdr := block.MetaBlock{ + Epoch: epoch, + Nonce: round, + Round: round, + PrevHash: prevHash, + Signature: []byte("signature"), + PubKeysBitmap: []byte(strings.Repeat("f", consensusSize)), + RootHash: []byte("roothash" + roundStr), + ShardInfo: make([]block.ShardData, 0), + TxCount: 1, + PrevRandSeed: prevRandSeed, + RandSeed: []byte("randseed" + roundStr), + AccumulatedFeesInEpoch: big.NewInt(0), + AccumulatedFees: big.NewInt(0), + DevFeesInEpoch: big.NewInt(0), + DeveloperFees: big.NewInt(0), + } + + shardMiniBlockHeaders := make([]block.MiniBlockHeader, 0) + shardMiniBlockHeader := block.MiniBlockHeader{ + Hash: []byte("mb_hash" + roundStr), + ReceiverShardID: 0, + SenderShardID: 0, + TxCount: 1, + } + shardMiniBlockHeaders = append(shardMiniBlockHeaders, shardMiniBlockHeader) + shardData := block.ShardData{ + Nonce: round, + ShardID: 0, + HeaderHash: []byte("hdr_hash" + roundStr), + TxCount: 1, + ShardMiniBlockHeaders: shardMiniBlockHeaders, + DeveloperFees: big.NewInt(0), + AccumulatedFees: big.NewInt(0), + } + hdr.ShardInfo = append(hdr.ShardInfo, shardData) + + return &hdr +} + +func (tmp *TestMetaProcessor) updateNodesConfig(epoch uint32) { + eligible, _ := tmp.NodesCoordinator.GetAllEligibleValidatorsPublicKeys(epoch) + waiting, _ := tmp.NodesCoordinator.GetAllWaitingValidatorsPublicKeys(epoch) + leaving, _ := tmp.NodesCoordinator.GetAllLeavingValidatorsPublicKeys(epoch) + shuffledOut, _ := tmp.NodesCoordinator.GetAllShuffledOutValidatorsPublicKeys(epoch) + + rootHash, _ := tmp.ValidatorStatistics.RootHash() + validatorsInfoMap, _ := tmp.ValidatorStatistics.GetValidatorInfoForRootHash(rootHash) + + auction := make([][]byte, 0) + newList := make([][]byte, 0) + for _, validator := range validatorsInfoMap.GetAllValidatorsInfo() { + if validator.GetList() == string(common.AuctionList) { + auction = append(auction, validator.GetPublicKey()) + } + if validator.GetList() == string(common.NewList) { + newList = append(newList, validator.GetPublicKey()) + } + } + + tmp.NodesConfig.eligible = eligible + tmp.NodesConfig.waiting = waiting + tmp.NodesConfig.shuffledOut = shuffledOut + tmp.NodesConfig.leaving = leaving + tmp.NodesConfig.auction = auction + tmp.NodesConfig.new = newList + tmp.NodesConfig.queue = tmp.getWaitingListKeys() +} + +func generateAddresses(startIdx, n uint32) [][]byte { + ret := make([][]byte, 0, n) + + for i := startIdx; i < n+startIdx; i++ { + ret = append(ret, generateAddress(i)) + } + + return ret +} + +func generateAddress(identifier uint32) []byte { + uniqueIdentifier := fmt.Sprintf("address-%d", identifier) + return []byte(strings.Repeat("0", addressLength-len(uniqueIdentifier)) + uniqueIdentifier) +} diff --git a/integrationTests/vm/staking/componentsHolderCreator.go b/integrationTests/vm/staking/componentsHolderCreator.go new file mode 100644 index 00000000000..e3673b08ec7 --- /dev/null +++ b/integrationTests/vm/staking/componentsHolderCreator.go @@ -0,0 +1,221 @@ +package staking + +import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/nodetype" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/typeConverters/uint64ByteSlice" + "github.com/multiversx/mx-chain-core-go/hashing/sha256" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/enablers" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/integrationTests" + integrationMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + mockFactory "github.com/multiversx/mx-chain-go/node/mock/factory" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + stateFactory "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/storagePruningManager" + "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" + "github.com/multiversx/mx-chain-go/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + notifierMocks "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + factoryTests "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" + "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + stateTests "github.com/multiversx/mx-chain-go/testscommon/state" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/trie" +) + +const hashSize = 32 + +func createComponentHolders(numOfShards uint32) ( + factory.CoreComponentsHolder, + factory.DataComponentsHolder, + factory.BootstrapComponentsHolder, + factory.StatusComponentsHolder, + factory.StateComponentsHandler, +) { + coreComponents := createCoreComponents() + statusComponents := createStatusComponents() + stateComponents := createStateComponents(coreComponents) + dataComponents := createDataComponents(coreComponents, numOfShards) + bootstrapComponents := createBootstrapComponents(coreComponents.InternalMarshalizer(), numOfShards) + + return coreComponents, dataComponents, bootstrapComponents, statusComponents, stateComponents +} + +func createCoreComponents() factory.CoreComponentsHolder { + epochNotifier := forking.NewGenericEpochNotifier() + configEnableEpochs := config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + StakingV4Step3EnableEpoch: stakingV4Step3EnableEpoch, + GovernanceEnableEpoch: integrationTests.UnreachableEpoch, + RefactorPeersMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + } + + enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(configEnableEpochs, epochNotifier) + + return &integrationMocks.CoreComponentsStub{ + InternalMarshalizerField: &marshal.GogoProtoMarshalizer{}, + HasherField: sha256.NewSha256(), + Uint64ByteSliceConverterField: uint64ByteSlice.NewBigEndianConverter(), + StatusHandlerField: statusHandler.NewStatusMetrics(), + RoundHandlerField: &mock.RoundHandlerMock{RoundTimeDuration: time.Second}, + EpochStartNotifierWithConfirmField: notifier.NewEpochStartSubscriptionHandler(), + EpochNotifierField: epochNotifier, + RaterField: &testscommon.RaterMock{Chance: 5}, + AddressPubKeyConverterField: testscommon.NewPubkeyConverterMock(addressLength), + EconomicsDataField: stakingcommon.CreateEconomicsData(), + ChanStopNodeProcessField: endProcess.GetDummyEndProcessChannel(), + NodeTypeProviderField: nodetype.NewNodeTypeProvider(core.NodeTypeValidator), + ProcessStatusHandlerInternal: statusHandler.NewProcessStatusHandler(), + EnableEpochsHandlerField: enableEpochsHandler, + EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, + RoundNotifierField: ¬ifierMocks.RoundNotifierStub{}, + } +} + +func createDataComponents(coreComponents factory.CoreComponentsHolder, numOfShards uint32) factory.DataComponentsHolder { + genesisBlock := createGenesisMetaBlock() + genesisBlockHash, _ := coreComponents.InternalMarshalizer().Marshal(genesisBlock) + genesisBlockHash = coreComponents.Hasher().Compute(string(genesisBlockHash)) + + blockChain, _ := blockchain.NewMetaChain(&statusHandlerMock.AppStatusHandlerStub{}) + _ = blockChain.SetGenesisHeader(createGenesisMetaBlock()) + blockChain.SetGenesisHeaderHash(genesisBlockHash) + + chainStorer := dataRetriever.NewChainStorer() + chainStorer.AddStorer(dataRetriever.BootstrapUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.MetaBlockUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.MiniBlockUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.BlockHeaderUnit, integrationTests.CreateMemUnit()) + for i := uint32(0); i < numOfShards; i++ { + unit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + chainStorer.AddStorer(unit, integrationTests.CreateMemUnit()) + } + + return &mockFactory.DataComponentsMock{ + Store: chainStorer, + DataPool: dataRetrieverMock.NewPoolsHolderMock(), + BlockChain: blockChain, + EconomicsData: coreComponents.EconomicsData(), + } +} + +func createBootstrapComponents( + marshaller marshal.Marshalizer, + numOfShards uint32, +) factory.BootstrapComponentsHolder { + shardCoordinator, _ := sharding.NewMultiShardCoordinator(numOfShards, core.MetachainShardId) + ncr, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + marshaller, + stakingV4Step2EnableEpoch, + ) + + return &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: shardCoordinator, + HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + VersionedHdrFactory: &testscommon.VersionedHeaderFactoryStub{ + CreateCalled: func(epoch uint32) data.HeaderHandler { + return &block.MetaBlock{Epoch: epoch} + }, + }, + NodesCoordinatorRegistryFactoryField: ncr, + } +} + +func createStatusComponents() factory.StatusComponentsHolder { + return &integrationMocks.StatusComponentsStub{ + Outport: &outport.OutportStub{}, + SoftwareVersionCheck: &integrationMocks.SoftwareVersionCheckerMock{}, + ManagedPeersMonitorField: &testscommon.ManagedPeersMonitorStub{}, + } +} + +func createStateComponents(coreComponents factory.CoreComponentsHolder) factory.StateComponentsHandler { + tsmArgs := getNewTrieStorageManagerArgs(coreComponents) + tsm, _ := trie.CreateTrieStorageManager(tsmArgs, trie.StorageManagerOptions{}) + trieFactoryManager, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) + + argsAccCreator := stateFactory.ArgsAccountCreator{ + Hasher: coreComponents.Hasher(), + Marshaller: coreComponents.InternalMarshalizer(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + + accCreator, _ := stateFactory.NewAccountCreator(argsAccCreator) + + userAccountsDB := createAccountsDB(coreComponents, accCreator, trieFactoryManager) + peerAccountsDB := createAccountsDB(coreComponents, stateFactory.NewPeerAccountCreator(), trieFactoryManager) + + _ = userAccountsDB.SetSyncer(&mock.AccountsDBSyncerStub{}) + _ = peerAccountsDB.SetSyncer(&mock.AccountsDBSyncerStub{}) + + return &factoryTests.StateComponentsMock{ + PeersAcc: peerAccountsDB, + Accounts: userAccountsDB, + } +} + +func getNewTrieStorageManagerArgs(coreComponents factory.CoreComponentsHolder) trie.NewTrieStorageManagerArgs { + return trie.NewTrieStorageManagerArgs{ + MainStorer: testscommon.CreateMemUnit(), + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, + IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: "id", + StatsCollector: disabled.NewStateStatistics(), + } +} + +func createAccountsDB( + coreComponents factory.CoreComponentsHolder, + accountFactory state.AccountFactory, + trieStorageManager common.StorageManager, +) *state.AccountsDB { + tr, _ := trie.NewTrie( + trieStorageManager, + coreComponents.InternalMarshalizer(), + coreComponents.Hasher(), + coreComponents.EnableEpochsHandler(), + 5, + ) + + argsEvictionWaitingList := evictionWaitingList.MemoryEvictionWaitingListArgs{ + RootHashesSize: 10, + HashesSize: hashSize, + } + ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(argsEvictionWaitingList) + spm, _ := storagePruningManager.NewStoragePruningManager(ewl, 10) + argsAccountsDb := state.ArgsAccountsDB{ + Trie: tr, + Hasher: coreComponents.Hasher(), + Marshaller: coreComponents.InternalMarshalizer(), + AccountFactory: accountFactory, + StoragePruningManager: spm, + AddressConverter: coreComponents.AddressPubKeyConverter(), + SnapshotsManager: &stateTests.SnapshotsManagerStub{}, + } + adb, _ := state.NewAccountsDB(argsAccountsDb) + return adb +} diff --git a/integrationTests/vm/staking/configDisplayer.go b/integrationTests/vm/staking/configDisplayer.go new file mode 100644 index 00000000000..3ea2a402f7f --- /dev/null +++ b/integrationTests/vm/staking/configDisplayer.go @@ -0,0 +1,132 @@ +package staking + +import ( + "bytes" + "fmt" + "sort" + "strconv" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/state" +) + +const ( + delimiter = "#" + maxPubKeysListLen = 6 +) + +// TODO: Make a subcomponent which will register to epoch notifier to display config only upon epoch change + +func getAllPubKeys(validatorsMap map[uint32][][]byte) [][]byte { + allValidators := make([][]byte, 0) + for _, validatorsInShard := range validatorsMap { + allValidators = append(allValidators, validatorsInShard...) + } + + return allValidators +} + +func getShortPubKeysList(pubKeys [][]byte) [][]byte { + pubKeysToDisplay := pubKeys + sort.SliceStable(pubKeysToDisplay, func(i, j int) bool { + return string(pubKeysToDisplay[i]) < string(pubKeysToDisplay[j]) + }) + + if len(pubKeys) > maxPubKeysListLen { + pubKeysToDisplay = make([][]byte, 0) + pubKeysToDisplay = append(pubKeysToDisplay, pubKeys[:maxPubKeysListLen/2]...) + pubKeysToDisplay = append(pubKeysToDisplay, [][]byte{[]byte("...")}...) + pubKeysToDisplay = append(pubKeysToDisplay, pubKeys[len(pubKeys)-maxPubKeysListLen/2:]...) + } + + return pubKeysToDisplay +} + +func (tmp *TestMetaProcessor) getAllNodeKeys() state.ShardValidatorsInfoMapHandler { + rootHash, _ := tmp.ValidatorStatistics.RootHash() + validatorsMap, _ := tmp.ValidatorStatistics.GetValidatorInfoForRootHash(rootHash) + return validatorsMap +} + +func (tmp *TestMetaProcessor) displayConfig(config nodesConfig) { + lines := make([]*display.LineData, 0) + + allNodes := tmp.getAllNodeKeys() + _ = tmp.StakingDataProvider.PrepareStakingData(allNodes) + + numShards := uint32(len(config.eligible)) + for shardId := uint32(0); shardId < numShards; shardId++ { + shard := getShardId(shardId, numShards) + + lines = append(lines, tmp.getDisplayableValidatorsInShard("eligible", config.eligible[shard], shard)...) + lines = append(lines, tmp.getDisplayableValidatorsInShard("waiting", config.waiting[shard], shard)...) + lines = append(lines, tmp.getDisplayableValidatorsInShard("leaving", config.leaving[shard], shard)...) + lines = append(lines, tmp.getDisplayableValidatorsInShard("shuffled", config.shuffledOut[shard], shard)...) + lines = append(lines, display.NewLineData(true, []string{})) + } + lines = append(lines, display.NewLineData(true, []string{"eligible", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.eligible))), "", "", "All shards"})) + lines = append(lines, display.NewLineData(true, []string{"waiting", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.waiting))), "", "", "All shards"})) + lines = append(lines, display.NewLineData(true, []string{"leaving", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.leaving))), "", "", "All shards"})) + lines = append(lines, display.NewLineData(true, []string{"shuffled", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.shuffledOut))), "", "", "All shards"})) + + tableHeader := []string{"List", "BLS key", "Owner", "TopUp", "Shard ID"} + table, _ := display.CreateTableString(tableHeader, lines) + headline := display.Headline("Nodes config", "", delimiter) + fmt.Printf("%s\n%s\n", headline, table) + + tmp.displayValidators("New", config.new) + tmp.displayValidators("Auction", config.auction) + tmp.displayValidators("Queue", config.queue) + + tmp.StakingDataProvider.Clean() +} + +func getShardId(shardId, numShards uint32) uint32 { + if shardId == numShards-1 { + return core.MetachainShardId + } + + return shardId +} + +func (tmp *TestMetaProcessor) getDisplayableValidatorsInShard(list string, pubKeys [][]byte, shardID uint32) []*display.LineData { + pubKeysToDisplay := getShortPubKeysList(pubKeys) + + lines := make([]*display.LineData, 0) + for idx, pk := range pubKeysToDisplay { + horizontalLineAfter := idx == len(pubKeysToDisplay)-1 + owner, _ := tmp.StakingDataProvider.GetBlsKeyOwner(pk) + topUp, _ := tmp.StakingDataProvider.GetNodeStakedTopUp(pk) + if bytes.Equal(pk, []byte("...")) { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), "...", "...", strconv.Itoa(int(shardID))})) + } else { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), owner, topUp.String(), strconv.Itoa(int(shardID))})) + } + } + lines = append(lines, display.NewLineData(true, []string{list, fmt.Sprintf("Total: %d", len(pubKeys)), "", "", strconv.Itoa(int(shardID))})) + + return lines +} + +func (tmp *TestMetaProcessor) displayValidators(list string, pubKeys [][]byte) { + pubKeysToDisplay := getShortPubKeysList(pubKeys) + + lines := make([]*display.LineData, 0) + tableHeader := []string{"List", "BLS key", "Owner", "TopUp"} + for idx, pk := range pubKeysToDisplay { + horizontalLineAfter := idx == len(pubKeysToDisplay)-1 + owner, _ := tmp.StakingDataProvider.GetBlsKeyOwner(pk) + topUp, _ := tmp.StakingDataProvider.GetNodeStakedTopUp(pk) + if bytes.Equal(pk, []byte("...")) { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), "...", "..."})) + } else { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), owner, topUp.String()})) + } + } + lines = append(lines, display.NewLineData(true, []string{list, fmt.Sprintf("Total: %d", len(pubKeys))})) + + headline := display.Headline(fmt.Sprintf("%s list", list), "", delimiter) + table, _ := display.CreateTableString(tableHeader, lines) + fmt.Printf("%s \n%s\n", headline, table) +} diff --git a/integrationTests/vm/staking/metaBlockProcessorCreator.go b/integrationTests/vm/staking/metaBlockProcessorCreator.go new file mode 100644 index 00000000000..759458cf30e --- /dev/null +++ b/integrationTests/vm/staking/metaBlockProcessorCreator.go @@ -0,0 +1,245 @@ +package staking + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/factory" + integrationMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/process" + blproc "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/postprocess" + "github.com/multiversx/mx-chain-go/process/block/processedMb" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/process/scToProtocol" + "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/dblookupext" + factory2 "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/integrationtests" + "github.com/multiversx/mx-chain-go/testscommon/outport" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func createMetaBlockProcessor( + nc nodesCoordinator.NodesCoordinator, + systemSCProcessor process.EpochStartSystemSCProcessor, + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + bootstrapComponents factory.BootstrapComponentsHolder, + statusComponents factory.StatusComponentsHolder, + stateComponents factory.StateComponentsHandler, + validatorsInfoCreator process.ValidatorStatisticsProcessor, + blockChainHook process.BlockChainHookHandler, + metaVMFactory process.VirtualMachinesContainerFactory, + epochStartHandler process.EpochStartTriggerHandler, + vmContainer process.VirtualMachinesContainer, + txCoordinator process.TransactionCoordinator, +) process.BlockProcessor { + blockTracker := createBlockTracker( + dataComponents.Blockchain().GetGenesisHeader(), + bootstrapComponents.ShardCoordinator(), + ) + epochStartDataCreator := createEpochStartDataCreator( + coreComponents, + dataComponents, + bootstrapComponents.ShardCoordinator(), + epochStartHandler, + blockTracker, + ) + + accountsDb := make(map[state.AccountsDbIdentifier]state.AccountsAdapter) + accountsDb[state.UserAccountsState] = stateComponents.AccountsAdapter() + accountsDb[state.PeerAccountsState] = stateComponents.PeerAccounts() + + bootStrapStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + bootStorer, _ := bootstrapStorage.NewBootstrapStorer( + coreComponents.InternalMarshalizer(), + bootStrapStorer, + ) + + headerValidator := createHeaderValidator(coreComponents) + valInfoCreator := createValidatorInfoCreator(coreComponents, dataComponents, bootstrapComponents.ShardCoordinator()) + stakingToPeer := createSCToProtocol(coreComponents, stateComponents, dataComponents.Datapool().CurrentBlockTxs()) + + args := blproc.ArgMetaProcessor{ + ArgBaseProcessor: blproc.ArgBaseProcessor{ + CoreComponents: coreComponents, + DataComponents: dataComponents, + BootstrapComponents: bootstrapComponents, + StatusComponents: statusComponents, + StatusCoreComponents: &factory2.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, + }, + AccountsDB: accountsDb, + ForkDetector: &integrationMocks.ForkDetectorStub{}, + NodesCoordinator: nc, + FeeHandler: postprocess.NewFeeAccumulator(), + RequestHandler: &testscommon.RequestHandlerStub{}, + BlockChainHook: blockChainHook, + TxCoordinator: txCoordinator, + EpochStartTrigger: epochStartHandler, + HeaderValidator: headerValidator, + BootStorer: bootStorer, + BlockTracker: blockTracker, + BlockSizeThrottler: &mock.BlockSizeThrottlerStub{}, + HistoryRepository: &dblookupext.HistoryRepositoryStub{}, + VMContainersFactory: metaVMFactory, + VmContainer: vmContainer, + GasHandler: &mock.GasHandlerMock{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 10000, + ProcessedMiniBlocksTracker: processedMb.NewProcessedMiniBlocksTracker(), + OutportDataProvider: &outport.OutportDataProviderStub{}, + ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, + ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, + }, + SCToProtocol: stakingToPeer, + PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, + EpochStartDataCreator: epochStartDataCreator, + EpochEconomics: &mock.EpochEconomicsStub{}, + EpochRewardsCreator: &testscommon.RewardsCreatorStub{ + GetLocalTxCacheCalled: func() epochStart.TransactionCacher { + return dataComponents.Datapool().CurrentBlockTxs() + }, + }, + EpochValidatorInfoCreator: valInfoCreator, + ValidatorStatisticsProcessor: validatorsInfoCreator, + EpochSystemSCProcessor: systemSCProcessor, + } + + metaProc, _ := blproc.NewMetaProcessor(args) + return metaProc +} + +func createValidatorInfoCreator( + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + shardCoordinator sharding.Coordinator, +) process.EpochStartValidatorInfoCreator { + mbStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.MiniBlockUnit) + + args := metachain.ArgsNewValidatorInfoCreator{ + ShardCoordinator: shardCoordinator, + MiniBlockStorage: mbStorer, + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + DataPool: dataComponents.Datapool(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + ValidatorInfoStorage: integrationtests.CreateMemUnit(), + } + + valInfoCreator, _ := metachain.NewValidatorInfoCreator(args) + return valInfoCreator +} + +func createEpochStartDataCreator( + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + shardCoordinator sharding.Coordinator, + epochStartTrigger process.EpochStartTriggerHandler, + blockTracker process.BlockTracker, +) process.EpochStartDataCreator { + argsEpochStartDataCreator := metachain.ArgsNewEpochStartData{ + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + Store: dataComponents.StorageService(), + DataPool: dataComponents.Datapool(), + BlockTracker: blockTracker, + ShardCoordinator: shardCoordinator, + EpochStartTrigger: epochStartTrigger, + RequestHandler: &testscommon.RequestHandlerStub{}, + GenesisEpoch: 0, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + epochStartDataCreator, _ := metachain.NewEpochStartData(argsEpochStartDataCreator) + return epochStartDataCreator +} + +func createBlockTracker( + genesisMetaHeader data.HeaderHandler, + shardCoordinator sharding.Coordinator, +) process.BlockTracker { + genesisBlocks := make(map[uint32]data.HeaderHandler) + for ShardID := uint32(0); ShardID < shardCoordinator.NumberOfShards(); ShardID++ { + genesisBlocks[ShardID] = createGenesisBlock(ShardID) + } + + genesisBlocks[core.MetachainShardId] = genesisMetaHeader + return mock.NewBlockTrackerMock(shardCoordinator, genesisBlocks) +} + +func createGenesisBlock(shardID uint32) *block.Header { + rootHash := []byte("roothash") + return &block.Header{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + ShardID: shardID, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + AccumulatedFees: big.NewInt(0), + DeveloperFees: big.NewInt(0), + } +} + +func createGenesisMetaBlock() *block.MetaBlock { + rootHash := []byte("roothash") + return &block.MetaBlock{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + AccumulatedFees: big.NewInt(0), + DeveloperFees: big.NewInt(0), + AccumulatedFeesInEpoch: big.NewInt(0), + DevFeesInEpoch: big.NewInt(0), + } +} + +func createHeaderValidator(coreComponents factory.CoreComponentsHolder) epochStart.HeaderValidator { + argsHeaderValidator := blproc.ArgsHeaderValidator{ + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + } + headerValidator, _ := blproc.NewHeaderValidator(argsHeaderValidator) + return headerValidator +} + +func createSCToProtocol( + coreComponents factory.CoreComponentsHolder, + stateComponents factory.StateComponentsHandler, + txCacher dataRetriever.TransactionCacher, +) process.SmartContractToProtocolHandler { + args := scToProtocol.ArgStakingToPeer{ + PubkeyConv: coreComponents.AddressPubKeyConverter(), + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + PeerState: stateComponents.PeerAccounts(), + BaseState: stateComponents.AccountsAdapter(), + ArgParser: smartContract.NewArgumentParser(), + CurrTxs: txCacher, + RatingsData: &mock.RatingsInfoMock{}, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + stakingToPeer, _ := scToProtocol.NewStakingToPeer(args) + return stakingToPeer +} diff --git a/integrationTests/vm/staking/nodesCoordiantorCreator.go b/integrationTests/vm/staking/nodesCoordiantorCreator.go new file mode 100644 index 00000000000..27a54719521 --- /dev/null +++ b/integrationTests/vm/staking/nodesCoordiantorCreator.go @@ -0,0 +1,232 @@ +package staking + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" + "github.com/multiversx/mx-chain-go/factory" + integrationMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state/accounts" + "github.com/multiversx/mx-chain-go/storage" + nodesSetupMock "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/multiversx/mx-chain-storage-go/lrucache" +) + +const ( + shuffleBetweenShards = false + adaptivity = false + hysteresis = float32(0.2) + initialRating = 5 +) + +func createNodesCoordinator( + eligibleMap map[uint32][]nodesCoordinator.Validator, + waitingMap map[uint32][]nodesCoordinator.Validator, + numOfMetaNodes uint32, + numOfShards uint32, + numOfEligibleNodesPerShard uint32, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + coreComponents factory.CoreComponentsHolder, + bootStorer storage.Storer, + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory, + maxNodesConfig []config.MaxNodesChangeConfig, +) nodesCoordinator.NodesCoordinator { + shufflerArgs := &nodesCoordinator.NodesShufflerArgs{ + NodesShard: numOfEligibleNodesPerShard, + NodesMeta: numOfMetaNodes, + Hysteresis: hysteresis, + Adaptivity: adaptivity, + ShuffleBetweenShards: shuffleBetweenShards, + MaxNodesEnableConfig: maxNodesConfig, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + StakingV4Step3EnableEpoch: stakingV4Step3EnableEpoch, + }, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + nodeShuffler, _ := nodesCoordinator.NewHashValidatorsShuffler(shufflerArgs) + cache, _ := lrucache.NewCache(10000) + argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + ShardIDAsObserver: core.MetachainShardId, + NbShards: numOfShards, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: eligibleMap[core.MetachainShardId][0].PubKey(), + ConsensusGroupCache: cache, + ShuffledOutHandler: &integrationMocks.ShuffledOutHandlerStub{}, + ChanStopNode: coreComponents.ChanStopNodeProcess(), + IsFullArchive: false, + Shuffler: nodeShuffler, + BootStorer: bootStorer, + EpochStartNotifier: coreComponents.EpochStartNotifierWithConfirm(), + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, + NodeTypeProvider: coreComponents.NodeTypeProvider(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), + GenesisNodesSetupHandler: &nodesSetupMock.NodesSetupStub{}, + } + + baseNodesCoordinator, _ := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) + nodesCoord, _ := nodesCoordinator.NewIndexHashedNodesCoordinatorWithRater(baseNodesCoordinator, coreComponents.Rater()) + return nodesCoord +} + +func createGenesisNodes( + numOfMetaNodes uint32, + numOfShards uint32, + numOfNodesPerShard uint32, + numOfWaitingNodesPerShard uint32, + marshaller marshal.Marshalizer, + stateComponents factory.StateComponentsHandler, +) (map[uint32][]nodesCoordinator.Validator, map[uint32][]nodesCoordinator.Validator) { + addressStartIdx := uint32(0) + eligibleGenesisNodes := generateGenesisNodeInfoMap(numOfMetaNodes, numOfShards, numOfNodesPerShard, addressStartIdx) + eligibleValidators, _ := nodesCoordinator.NodesInfoToValidators(eligibleGenesisNodes) + + addressStartIdx = numOfMetaNodes + numOfShards*numOfNodesPerShard + waitingGenesisNodes := generateGenesisNodeInfoMap(numOfWaitingNodesPerShard, numOfShards, numOfWaitingNodesPerShard, addressStartIdx) + waitingValidators, _ := nodesCoordinator.NodesInfoToValidators(waitingGenesisNodes) + + registerValidators(eligibleValidators, stateComponents, marshaller, common.EligibleList) + registerValidators(waitingValidators, stateComponents, marshaller, common.WaitingList) + + return eligibleValidators, waitingValidators +} + +func createGenesisNodesWithCustomConfig( + owners map[string]*OwnerStats, + marshaller marshal.Marshalizer, + stateComponents factory.StateComponentsHandler, +) (map[uint32][]nodesCoordinator.Validator, map[uint32][]nodesCoordinator.Validator) { + eligibleGenesis := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + waitingGenesis := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + + for owner, ownerStats := range owners { + registerOwnerKeys( + []byte(owner), + ownerStats.EligibleBlsKeys, + ownerStats.TotalStake, + stateComponents, + marshaller, + common.EligibleList, + eligibleGenesis, + ) + + registerOwnerKeys( + []byte(owner), + ownerStats.WaitingBlsKeys, + ownerStats.TotalStake, + stateComponents, + marshaller, + common.WaitingList, + waitingGenesis, + ) + } + + eligible, _ := nodesCoordinator.NodesInfoToValidators(eligibleGenesis) + waiting, _ := nodesCoordinator.NodesInfoToValidators(waitingGenesis) + + return eligible, waiting +} + +func generateGenesisNodeInfoMap( + numOfMetaNodes uint32, + numOfShards uint32, + numOfNodesPerShard uint32, + addressStartIdx uint32, +) map[uint32][]nodesCoordinator.GenesisNodeInfoHandler { + validatorsMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + id := addressStartIdx + for shardId := uint32(0); shardId < numOfShards; shardId++ { + for n := uint32(0); n < numOfNodesPerShard; n++ { + addr := generateAddress(id) + validator := integrationMocks.NewNodeInfo(addr, addr, shardId, initialRating) + validatorsMap[shardId] = append(validatorsMap[shardId], validator) + id++ + } + } + + for n := uint32(0); n < numOfMetaNodes; n++ { + addr := generateAddress(id) + validator := integrationMocks.NewNodeInfo(addr, addr, core.MetachainShardId, initialRating) + validatorsMap[core.MetachainShardId] = append(validatorsMap[core.MetachainShardId], validator) + id++ + } + + return validatorsMap +} + +func registerOwnerKeys( + owner []byte, + ownerPubKeys map[uint32][][]byte, + totalStake *big.Int, + stateComponents factory.StateComponentsHolder, + marshaller marshal.Marshalizer, + list common.PeerType, + allNodes map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, +) { + for shardID, pubKeysInShard := range ownerPubKeys { + for _, pubKey := range pubKeysInShard { + validator := integrationMocks.NewNodeInfo(pubKey, pubKey, shardID, initialRating) + allNodes[shardID] = append(allNodes[shardID], validator) + + savePeerAcc(stateComponents, pubKey, shardID, list) + } + stakingcommon.RegisterValidatorKeys( + stateComponents.AccountsAdapter(), + owner, + owner, + pubKeysInShard, + totalStake, + marshaller, + ) + } +} + +func registerValidators( + validators map[uint32][]nodesCoordinator.Validator, + stateComponents factory.StateComponentsHolder, + marshaller marshal.Marshalizer, + list common.PeerType, +) { + for shardID, validatorsInShard := range validators { + for idx, val := range validatorsInShard { + pubKey := val.PubKey() + savePeerAcc(stateComponents, pubKey, shardID, list) + + stakingcommon.RegisterValidatorKeys( + stateComponents.AccountsAdapter(), + pubKey, + pubKey, + [][]byte{pubKey}, + big.NewInt(nodePrice+int64(idx)), + marshaller, + ) + } + } +} + +func savePeerAcc( + stateComponents factory.StateComponentsHolder, + pubKey []byte, + shardID uint32, + list common.PeerType, +) { + peerAccount, _ := accounts.NewPeerAccount(pubKey) + peerAccount.SetTempRating(initialRating) + peerAccount.ShardId = shardID + peerAccount.BLSPublicKey = pubKey + peerAccount.List = string(list) + _ = stateComponents.PeerAccounts().SaveAccount(peerAccount) +} diff --git a/integrationTests/vm/staking/stakingQueue.go b/integrationTests/vm/staking/stakingQueue.go new file mode 100644 index 00000000000..7544e18cf40 --- /dev/null +++ b/integrationTests/vm/staking/stakingQueue.go @@ -0,0 +1,121 @@ +package staking + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" +) + +func createStakingQueue( + numOfNodesInStakingQueue uint32, + totalNumOfNodes uint32, + marshaller marshal.Marshalizer, + accountsAdapter state.AccountsAdapter, +) [][]byte { + ownerWaitingNodes := make([][]byte, 0) + if numOfNodesInStakingQueue == 0 { + return ownerWaitingNodes + } + + owner := generateAddress(totalNumOfNodes) + totalNumOfNodes += 1 + for i := totalNumOfNodes; i < totalNumOfNodes+numOfNodesInStakingQueue; i++ { + ownerWaitingNodes = append(ownerWaitingNodes, generateAddress(i)) + } + + stakingcommon.AddKeysToWaitingList( + accountsAdapter, + ownerWaitingNodes, + marshaller, + owner, + owner, + ) + + stakingcommon.RegisterValidatorKeys( + accountsAdapter, + owner, + owner, + ownerWaitingNodes, + big.NewInt(int64(2*nodePrice*numOfNodesInStakingQueue)), + marshaller, + ) + + return ownerWaitingNodes +} + +func createStakingQueueCustomNodes( + owners map[string]*OwnerStats, + marshaller marshal.Marshalizer, + accountsAdapter state.AccountsAdapter, +) [][]byte { + queue := make([][]byte, 0) + + for owner, ownerStats := range owners { + stakingcommon.RegisterValidatorKeys( + accountsAdapter, + []byte(owner), + []byte(owner), + ownerStats.StakingQueueKeys, + ownerStats.TotalStake, + marshaller, + ) + + stakingcommon.AddKeysToWaitingList( + accountsAdapter, + ownerStats.StakingQueueKeys, + marshaller, + []byte(owner), + []byte(owner), + ) + + queue = append(queue, ownerStats.StakingQueueKeys...) + } + + return queue +} + +func (tmp *TestMetaProcessor) getWaitingListKeys() [][]byte { + stakingSCAcc := stakingcommon.LoadUserAccount(tmp.AccountsAdapter, vm.StakingSCAddress) + + waitingList := &systemSmartContracts.WaitingList{ + FirstKey: make([]byte, 0), + LastKey: make([]byte, 0), + Length: 0, + LastJailedKey: make([]byte, 0), + } + marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) + if len(marshaledData) == 0 { + return nil + } + + err := tmp.Marshaller.Unmarshal(waitingList, marshaledData) + if err != nil { + return nil + } + + index := uint32(1) + nextKey := make([]byte, len(waitingList.FirstKey)) + copy(nextKey, waitingList.FirstKey) + + allPubKeys := make([][]byte, 0) + for len(nextKey) != 0 && index <= waitingList.Length { + allPubKeys = append(allPubKeys, nextKey[2:]) // remove "w_" prefix + + element, errGet := stakingcommon.GetWaitingListElement(stakingSCAcc, tmp.Marshaller, nextKey) + if errGet != nil { + return nil + } + + nextKey = make([]byte, len(element.NextKey)) + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + return allPubKeys +} diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go new file mode 100644 index 00000000000..077c87c407b --- /dev/null +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -0,0 +1,1791 @@ +package staking + +import ( + "bytes" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" + "github.com/stretchr/testify/require" +) + +func requireSliceContains(t *testing.T, s1, s2 [][]byte) { + for _, elemInS2 := range s2 { + require.Contains(t, s1, elemInS2) + } +} + +func requireSliceContainsNumOfElements(t *testing.T, s1, s2 [][]byte, numOfElements int) { + foundCt := 0 + for _, elemInS2 := range s2 { + if searchInSlice(s1, elemInS2) { + foundCt++ + } + } + + require.Equal(t, numOfElements, foundCt) +} + +func requireSameSliceDifferentOrder(t *testing.T, s1, s2 [][]byte) { + require.Equal(t, len(s1), len(s2)) + + for _, elemInS1 := range s1 { + require.Contains(t, s2, elemInS1) + } +} + +func searchInSlice(s1 [][]byte, s2 []byte) bool { + for _, elemInS1 := range s1 { + if bytes.Equal(elemInS1, s2) { + return true + } + } + + return false +} + +func searchInMap(validatorMap map[uint32][][]byte, pk []byte) bool { + for _, validatorsInShard := range validatorMap { + for _, val := range validatorsInShard { + if bytes.Equal(val, pk) { + return true + } + } + } + return false +} + +func requireMapContains(t *testing.T, m map[uint32][][]byte, s [][]byte) { + for _, elemInSlice := range s { + require.True(t, searchInMap(m, elemInSlice)) + } +} + +func requireMapDoesNotContain(t *testing.T, m map[uint32][][]byte, s [][]byte) { + for _, elemInSlice := range s { + require.False(t, searchInMap(m, elemInSlice)) + } +} + +// remove will remove the item from slice without keeping the order of the original slice +func remove(slice [][]byte, elem []byte) [][]byte { + ret := slice + for i, e := range slice { + if bytes.Equal(elem, e) { + ret[i] = ret[len(slice)-1] + return ret[:len(slice)-1] + } + } + + return ret +} + +func getIntersection(slice1, slice2 [][]byte) [][]byte { + ret := make([][]byte, 0) + for _, value := range slice2 { + if searchInSlice(slice1, value) { + copiedVal := make([]byte, len(value)) + copy(copiedVal, value) + ret = append(ret, copiedVal) + } + } + + return ret +} + +func getAllPubKeysFromConfig(nodesCfg nodesConfig) [][]byte { + allPubKeys := getAllPubKeys(nodesCfg.eligible) + allPubKeys = append(allPubKeys, getAllPubKeys(nodesCfg.waiting)...) + allPubKeys = append(allPubKeys, getAllPubKeys(nodesCfg.leaving)...) + allPubKeys = append(allPubKeys, getAllPubKeys(nodesCfg.shuffledOut)...) + allPubKeys = append(allPubKeys, nodesCfg.queue...) + allPubKeys = append(allPubKeys, nodesCfg.auction...) + allPubKeys = append(allPubKeys, nodesCfg.new...) + + return allPubKeys +} + +func unStake(t *testing.T, owner []byte, accountsDB state.AccountsAdapter, marshaller marshal.Marshalizer, stake *big.Int) { + validatorSC := stakingcommon.LoadUserAccount(accountsDB, vm.ValidatorSCAddress) + ownerStoredData, _, err := validatorSC.RetrieveValue(owner) + require.Nil(t, err) + + validatorData := &systemSmartContracts.ValidatorDataV2{} + err = marshaller.Unmarshal(validatorData, ownerStoredData) + require.Nil(t, err) + + validatorData.TotalStakeValue.Sub(validatorData.TotalStakeValue, stake) + marshaledData, _ := marshaller.Marshal(validatorData) + err = validatorSC.SaveKeyValue(owner, marshaledData) + require.Nil(t, err) + + err = accountsDB.SaveAccount(validatorSC) + require.Nil(t, err) + _, err = accountsDB.Commit() + require.Nil(t, err) +} + +type configNum struct { + eligible map[uint32]int + waiting map[uint32]int + leaving map[uint32]int + shuffledOut map[uint32]int + queue int + auction int + new int +} + +func checkConfig(t *testing.T, expectedConfig *configNum, nodesConfig nodesConfig) { + checkNumNodes(t, expectedConfig.eligible, nodesConfig.eligible) + checkNumNodes(t, expectedConfig.waiting, nodesConfig.waiting) + checkNumNodes(t, expectedConfig.leaving, nodesConfig.leaving) + checkNumNodes(t, expectedConfig.shuffledOut, nodesConfig.shuffledOut) + + require.Equal(t, expectedConfig.queue, len(nodesConfig.queue)) + require.Equal(t, expectedConfig.auction, len(nodesConfig.auction)) + require.Equal(t, expectedConfig.new, len(nodesConfig.new)) +} + +func checkNumNodes(t *testing.T, expectedNumNodes map[uint32]int, actualNodes map[uint32][][]byte) { + for shardID, numNodesInShard := range expectedNumNodes { + require.Equal(t, numNodesInShard, len(actualNodes[shardID])) + } +} + +func checkShuffledOutNodes(t *testing.T, currNodesConfig, prevNodesConfig nodesConfig, numShuffledOutNodes int, numRemainingEligible int) { + // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numShuffledOutNodes) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), numRemainingEligible) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), numShuffledOutNodes) +} + +func checkStakingV4EpochChangeFlow( + t *testing.T, + currNodesConfig, prevNodesConfig nodesConfig, + numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction int) { + + // Nodes which are now in eligible are from previous waiting list + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numOfShuffledOut) + + // New auction list also contains unselected nodes from previous auction list + requireSliceContainsNumOfElements(t, currNodesConfig.auction, prevNodesConfig.auction, numOfUnselectedNodesFromAuction) + + // All shuffled out are from previous eligible config + requireMapContains(t, prevNodesConfig.eligible, getAllPubKeys(currNodesConfig.shuffledOut)) + + // All shuffled out are now in auction + requireSliceContains(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) + + // Nodes which have been selected from previous auction list are now in waiting + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), prevNodesConfig.auction, numOfSelectedNodesFromAuction) +} + +func getAllOwnerNodesMap(nodeGroups ...[][]byte) map[string][][]byte { + ret := make(map[string][][]byte) + + for _, nodes := range nodeGroups { + addNodesToMap(nodes, ret) + } + + return ret +} + +func addNodesToMap(nodes [][]byte, allOwnerNodes map[string][][]byte) { + for _, node := range nodes { + allOwnerNodes[string(node)] = [][]byte{node} + } +} + +func TestStakingV4(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfMetaNodes := uint32(400) + numOfShards := uint32(3) + numOfEligibleNodesPerShard := uint32(400) + numOfWaitingNodesPerShard := uint32(400) + numOfNodesToShufflePerShard := uint32(80) + shardConsensusGroupSize := 266 + metaConsensusGroupSize := 266 + numOfNodesInStakingQueue := uint32(60) + + totalEligible := int(numOfEligibleNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + totalWaiting := int(numOfWaitingNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + + node := NewTestMetaProcessor( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + numOfNodesInStakingQueue, + ) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + initialNodes := node.NodesConfig + require.Len(t, getAllPubKeys(initialNodes.eligible), totalEligible) + require.Len(t, getAllPubKeys(initialNodes.waiting), totalWaiting) + require.Len(t, initialNodes.queue, int(numOfNodesInStakingQueue)) + require.Empty(t, initialNodes.shuffledOut) + require.Empty(t, initialNodes.auction) + + // 2. Check config after staking v4 initialization + node.Process(t, 5) + nodesConfigStakingV4Step1 := node.NodesConfig + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.eligible), totalEligible) + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.waiting), totalWaiting) + require.Empty(t, nodesConfigStakingV4Step1.queue) + require.Empty(t, nodesConfigStakingV4Step1.shuffledOut) + require.Empty(t, nodesConfigStakingV4Step1.auction) // the queue should be empty + + // 3. re-stake the node nodes that were in the queue + node.ProcessReStake(t, initialNodes.queue) + nodesConfigStakingV4Step1 = node.NodesConfig + requireSameSliceDifferentOrder(t, initialNodes.queue, nodesConfigStakingV4Step1.auction) + + // 4. Check config after first staking v4 epoch, WITHOUT distribution from auction -> waiting + node.Process(t, 6) + nodesConfigStakingV4Step2 := node.NodesConfig + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step2.eligible), totalEligible) // 1600 + + numOfShuffledOut := int((numOfShards + 1) * numOfNodesToShufflePerShard) // 320 + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step2.shuffledOut), numOfShuffledOut) + + newWaiting := totalWaiting - numOfShuffledOut // 1280 (1600 - 320) + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step2.waiting), newWaiting) + + // 380 (320 from shuffled out + 60 from initial staking queue -> auction from stakingV4 init) + auctionListSize := numOfShuffledOut + len(nodesConfigStakingV4Step1.auction) + require.Len(t, nodesConfigStakingV4Step2.auction, auctionListSize) + requireSliceContains(t, nodesConfigStakingV4Step2.auction, nodesConfigStakingV4Step1.auction) + + require.Empty(t, nodesConfigStakingV4Step2.queue) + require.Empty(t, nodesConfigStakingV4Step2.leaving) + + // 320 nodes which are now in eligible are from previous waiting list + requireSliceContainsNumOfElements(t, getAllPubKeys(nodesConfigStakingV4Step2.eligible), getAllPubKeys(nodesConfigStakingV4Step1.waiting), numOfShuffledOut) + + // All shuffled out are from previous staking v4 init eligible + requireMapContains(t, nodesConfigStakingV4Step1.eligible, getAllPubKeys(nodesConfigStakingV4Step2.shuffledOut)) + + // All shuffled out are in auction + requireSliceContains(t, nodesConfigStakingV4Step2.auction, getAllPubKeys(nodesConfigStakingV4Step2.shuffledOut)) + + // No auction node from previous epoch has been moved to waiting + requireMapDoesNotContain(t, nodesConfigStakingV4Step2.waiting, nodesConfigStakingV4Step1.auction) + + epochs := 0 + prevConfig := nodesConfigStakingV4Step2 + numOfSelectedNodesFromAuction := numOfShuffledOut // 320, since we will always fill shuffled out nodes with this config + numOfUnselectedNodesFromAuction := auctionListSize - numOfShuffledOut // 60 = 380 - 320 + for epochs < 10 { + node.Process(t, 5) + newNodeConfig := node.NodesConfig + + require.Len(t, getAllPubKeys(newNodeConfig.eligible), totalEligible) // 1600 + require.Len(t, getAllPubKeys(newNodeConfig.waiting), newWaiting) // 1280 + require.Len(t, getAllPubKeys(newNodeConfig.shuffledOut), numOfShuffledOut) // 320 + require.Len(t, newNodeConfig.auction, auctionListSize) // 380 + require.Empty(t, newNodeConfig.queue) + require.Empty(t, newNodeConfig.leaving) + + checkStakingV4EpochChangeFlow(t, newNodeConfig, prevConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + prevConfig = newNodeConfig + epochs++ + } +} + +func TestStakingV4MetaProcessor_ProcessMultipleNodesWithSameSetupExpectSameRootHash(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfMetaNodes := uint32(6) + numOfShards := uint32(3) + numOfEligibleNodesPerShard := uint32(6) + numOfWaitingNodesPerShard := uint32(6) + numOfNodesToShufflePerShard := uint32(2) + shardConsensusGroupSize := 2 + metaConsensusGroupSize := 2 + numOfNodesInStakingQueue := uint32(2) + + nodes := make([]*TestMetaProcessor, 0, numOfMetaNodes) + for i := uint32(0); i < numOfMetaNodes; i++ { + nodes = append(nodes, NewTestMetaProcessor( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + numOfNodesInStakingQueue, + )) + nodes[i].EpochStartTrigger.SetRoundsPerEpoch(4) + } + + numOfEpochs := uint32(15) + rootHashes := make(map[uint32][][]byte) + for currEpoch := uint32(1); currEpoch <= numOfEpochs; currEpoch++ { + for _, node := range nodes { + rootHash, _ := node.ValidatorStatistics.RootHash() + rootHashes[currEpoch] = append(rootHashes[currEpoch], rootHash) + + node.Process(t, 5) + require.Equal(t, currEpoch, node.EpochStartTrigger.Epoch()) + } + } + + for _, rootHashesInEpoch := range rootHashes { + firstNodeRootHashInEpoch := rootHashesInEpoch[0] + for _, rootHash := range rootHashesInEpoch { + require.Equal(t, firstNodeRootHashInEpoch, rootHash) + } + } +} + +func TestStakingV4_UnStakeNodesWithNotEnoughFunds(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + // Owner1 has 8 nodes, but enough stake for just 7 nodes. At the end of the epoch(staking v4 init), + // his last node from staking queue should be unStaked + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:3], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[3:6], + }, + StakingQueueKeys: pubKeys[6:8], + TotalStake: big.NewInt(7 * nodePrice), + } + + // Owner2 has 6 nodes, but enough stake for just 5 nodes. At the end of the epoch(staking v4 init), + // one node from waiting list should be unStaked + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[8:11], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[11:14], + }, + TotalStake: big.NewInt(5 * nodePrice), + } + + // Owner3 has 2 nodes in staking queue with topUp = nodePrice + owner3 := "owner3" + owner3Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[14:16], + TotalStake: big.NewInt(3 * nodePrice), + } + + // Owner4 has 1 node in staking queue with topUp = nodePrice + owner4 := "owner4" + owner4Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[16:17], + TotalStake: big.NewInt(2 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 3, + MinNumberOfEligibleMetaNodes: 3, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + owner3: owner3Stats, + owner4: owner4Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 6) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 3) + + requireSliceContainsNumOfElements(t, currNodesConfig.eligible[core.MetachainShardId], owner1Stats.EligibleBlsKeys[core.MetachainShardId], 3) + requireSliceContainsNumOfElements(t, currNodesConfig.waiting[core.MetachainShardId], owner2Stats.WaitingBlsKeys[core.MetachainShardId], 3) + requireSliceContainsNumOfElements(t, currNodesConfig.eligible[0], owner2Stats.EligibleBlsKeys[0], 3) + requireSliceContainsNumOfElements(t, currNodesConfig.waiting[0], owner1Stats.WaitingBlsKeys[0], 3) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner3StakingQueue := owner3Stats.StakingQueueKeys + owner4StakingQueue := owner4Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner3StakingQueue...) + queue = append(queue, owner4StakingQueue...) + require.Len(t, currNodesConfig.queue, 5) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // 2. Check config after staking v4 initialization + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 6) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 5) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 3) + + // Owner1 will have the second node from queue removed, before adding all the nodes to auction list + queue = remove(queue, owner1StakingQueue[1]) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.auction) // all nodes from the queue should be unStaked and the auction list should be empty + + // Owner2 will have one of the nodes in waiting list removed + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 1) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.leaving), getAllPubKeys(owner2Stats.WaitingBlsKeys), 1) + + // Owner1 will unStake some EGLD => at the end of next epoch, he should not be able to reStake all the nodes + unStake(t, []byte(owner1), node.AccountsAdapter, node.Marshaller, big.NewInt(0.1*nodePrice)) + + // 3. ReStake the nodes that were in the queue + queue = remove(queue, owner1StakingQueue[0]) + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.auction, 3) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // 4. Check config in epoch = staking v4 + node.Process(t, 4) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 6) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 3) + require.Len(t, getAllPubKeys(currNodesConfig.shuffledOut), 2) + + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 1) + require.Len(t, currNodesConfig.shuffledOut[core.MetachainShardId], 1) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Len(t, currNodesConfig.shuffledOut[0], 1) + + require.Len(t, currNodesConfig.auction, 3) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 0) + // There are no more unStaked nodes left from owner1 because of insufficient funds + requireSliceContainsNumOfElements(t, getAllPubKeysFromConfig(currNodesConfig), owner1StakingQueue, 0) + + // Owner3 will unStake EGLD => he will have negative top-up at the selection time => one of his nodes will be unStaked. + // His other node should not have been selected => remains in auction. + // Meanwhile, owner4 had never unStaked EGLD => his node from auction list will be distributed to waiting + unStake(t, []byte(owner3), node.AccountsAdapter, node.Marshaller, big.NewInt(2*nodePrice)) + + // 5. Check config in epoch = staking v4 step3 + node.Process(t, 5) + currNodesConfig = node.NodesConfig + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.leaving), owner3StakingQueue, 1) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, owner3StakingQueue, 1) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), owner4StakingQueue, 1) +} + +func TestStakingV4_StakeNewNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + // Owner1 has 6 nodes, zero top up + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:2], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[2:4], + }, + StakingQueueKeys: pubKeys[4:6], + TotalStake: big.NewInt(6 * nodePrice), + } + + // Owner2 has 4 nodes, zero top up + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[6:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:10], + }, + TotalStake: big.NewInt(4 * nodePrice), + } + // Owner3 has 1 node in staking queue with topUp = nodePrice + owner3 := "owner3" + owner3Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[10:11], + TotalStake: big.NewInt(2 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 1, + ShardConsensusGroupSize: 1, + MinNumberOfEligibleShardNodes: 1, + MinNumberOfEligibleMetaNodes: 1, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + owner3: owner3Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 8, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1.1 Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.eligible[0], 2) + require.Len(t, currNodesConfig.waiting[0], 2) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner3StakingQueue := owner3Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner3StakingQueue...) + require.Len(t, currNodesConfig.queue, 3) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // NewOwner0 stakes 1 node with top up = 0 before staking v4; should be sent to staking queue + newOwner0 := "newOwner0" + newNodes0 := map[string]*NodesRegisterData{ + newOwner0: { + BLSKeys: [][]byte{generateAddress(333)}, + TotalStake: big.NewInt(nodePrice), + }, + } + + // 1.2 Check staked node before staking v4 is sent to staking queue + node.ProcessStake(t, newNodes0) + queue = append(queue, newNodes0[newOwner0].BLSKeys...) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.queue, 4) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + // NewOwner1 stakes 1 node with top up = 2*node price; should be sent to auction list + newOwner1 := "newOwner1" + newNodes1 := map[string]*NodesRegisterData{ + newOwner1: { + BLSKeys: [][]byte{generateAddress(444)}, + TotalStake: big.NewInt(3 * nodePrice), + }, + } + // 2. Check config after staking v4 init when a new node is staked + node.Process(t, 4) + node.ProcessStake(t, newNodes1) + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig + queue = append(queue, newNodes1[newOwner1].BLSKeys...) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.leaving) + require.Len(t, currNodesConfig.auction, 5) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // NewOwner2 stakes 2 node with top up = 2*node price; should be sent to auction list + newOwner2 := "newOwner2" + newNodes2 := map[string]*NodesRegisterData{ + newOwner2: { + BLSKeys: [][]byte{generateAddress(555), generateAddress(666)}, + TotalStake: big.NewInt(4 * nodePrice), + }, + } + // 2. Check in epoch = staking v4 step2 when 2 new nodes are staked + node.Process(t, 4) + node.ProcessStake(t, newNodes2) + currNodesConfig = node.NodesConfig + queue = append(queue, newNodes2[newOwner2].BLSKeys...) + require.Empty(t, currNodesConfig.queue) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, queue, 7) + + // 3. Epoch = staking v4 step3 + // Only the new 2 owners + owner3 had enough top up to be distributed to waiting. + // Meanwhile, owner1 which had 0 top up, still has his bls keys in auction, along with newOwner0 + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Empty(t, currNodesConfig.queue) + requireMapContains(t, currNodesConfig.waiting, newNodes1[newOwner1].BLSKeys) + requireMapContains(t, currNodesConfig.waiting, newNodes2[newOwner2].BLSKeys) + requireMapContains(t, currNodesConfig.waiting, owner3StakingQueue) + requireSliceContains(t, currNodesConfig.auction, owner1StakingQueue) + requireSliceContains(t, currNodesConfig.auction, newNodes0[newOwner0].BLSKeys) +} + +func TestStakingV4_UnStakeNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:2], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[2:4], + }, + StakingQueueKeys: pubKeys[4:6], + TotalStake: big.NewInt(10 * nodePrice), + } + + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[6:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:12], + }, + StakingQueueKeys: pubKeys[12:15], + TotalStake: big.NewInt(10 * nodePrice), + } + + owner3 := "owner3" + owner3Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[15:17], + TotalStake: big.NewInt(6 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 1, + ShardConsensusGroupSize: 1, + MinNumberOfEligibleShardNodes: 2, + MinNumberOfEligibleMetaNodes: 2, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + owner3: owner3Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 2) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner2StakingQueue := owner2Stats.StakingQueueKeys + owner3StakingQueue := owner3Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner2StakingQueue...) + queue = append(queue, owner3StakingQueue...) + require.Len(t, currNodesConfig.queue, 7) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + // 1.1 Owner2 unStakes one of his staking queue nodes. Node should be removed from staking queue list + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2Stats.StakingQueueKeys[0]}, + }) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner2Stats.StakingQueueKeys[0]) + require.Len(t, currNodesConfig.queue, 6) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + require.Empty(t, currNodesConfig.new) + require.Empty(t, currNodesConfig.auction) + + // 1.2 Owner2 unStakes one of his waiting list keys. First node from staking queue should be added to fill its place. + copy(queue, currNodesConfig.queue) // copy queue to local variable so we have the queue in same order + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2Stats.WaitingBlsKeys[core.MetachainShardId][0]}, + }) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.new, 1) + requireSliceContains(t, queue, currNodesConfig.new) + require.Empty(t, currNodesConfig.auction) + queue = remove(queue, currNodesConfig.new[0]) + require.Len(t, currNodesConfig.queue, 5) + requireSameSliceDifferentOrder(t, queue, currNodesConfig.queue) + + // 2. Check config after staking v4 step1 + node.Process(t, 3) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + // Owner2's node from waiting list which was unStaked in previous epoch is now leaving + require.Len(t, currNodesConfig.leaving, 1) + require.Equal(t, owner2Stats.WaitingBlsKeys[core.MetachainShardId][0], currNodesConfig.leaving[core.MetachainShardId][0]) + require.Empty(t, currNodesConfig.auction) // all nodes from queue have been unStaked, the auction list is empty + + // 2.1 restake the nodes that were on the queue + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + + // 2.2 Owner3 unStakes one of his nodes from auction + node.ProcessUnStake(t, map[string][][]byte{ + owner3: {owner3StakingQueue[1]}, + }) + unStakedNodesInStakingV4Step1Epoch := make([][]byte, 0) + unStakedNodesInStakingV4Step1Epoch = append(unStakedNodesInStakingV4Step1Epoch, owner3StakingQueue[1]) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner3StakingQueue[1]) + require.Len(t, currNodesConfig.auction, 4) + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.new) + + // 2.3 Owner1 unStakes 2 nodes: one from auction + one active + node.ProcessUnStake(t, map[string][][]byte{ + owner1: {owner1StakingQueue[1], owner1Stats.WaitingBlsKeys[0][0]}, + }) + unStakedNodesInStakingV4Step1Epoch = append(unStakedNodesInStakingV4Step1Epoch, owner1StakingQueue[1]) + unStakedNodesInStakingV4Step1Epoch = append(unStakedNodesInStakingV4Step1Epoch, owner1Stats.WaitingBlsKeys[0][0]) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner1StakingQueue[1]) + require.Len(t, currNodesConfig.auction, 3) + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.new) + + // 3. Check config in epoch = staking v4 step2 + node.Process(t, 3) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 3) + // All unStaked nodes in previous epoch are now leaving + requireMapContains(t, currNodesConfig.leaving, unStakedNodesInStakingV4Step1Epoch) + // 3.1 Owner2 unStakes one of his nodes from auction + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2StakingQueue[1]}, + }) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner2StakingQueue[1]) + shuffledOutNodes := getAllPubKeys(currNodesConfig.shuffledOut) + require.Len(t, currNodesConfig.auction, len(shuffledOutNodes)+len(queue)) + requireSliceContains(t, currNodesConfig.auction, shuffledOutNodes) + requireSliceContains(t, currNodesConfig.auction, queue) + + // 4. Check config after whole staking v4 chain is ready, when one of the owners unStakes a node + node.Process(t, 4) + currNodesConfig = node.NodesConfig + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2Stats.EligibleBlsKeys[0][0]}, + }) + node.Process(t, 4) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 1) + requireMapContains(t, currNodesConfig.leaving, [][]byte{owner2Stats.EligibleBlsKeys[0][0]}) + require.Empty(t, currNodesConfig.new) + require.Empty(t, currNodesConfig.queue) + + // 4.1 NewOwner stakes 1 node, should be sent to auction + newOwner := "newOwner1" + newNode := map[string]*NodesRegisterData{ + newOwner: { + BLSKeys: [][]byte{generateAddress(444)}, + TotalStake: big.NewInt(2 * nodePrice), + }, + } + node.ProcessStake(t, newNode) + currNodesConfig = node.NodesConfig + requireSliceContains(t, currNodesConfig.auction, newNode[newOwner].BLSKeys) + + // 4.2 NewOwner unStakes his node, he should not be in auction anymore + set to leaving + node.ProcessUnStake(t, map[string][][]byte{ + newOwner: {newNode[newOwner].BLSKeys[0]}, + }) + currNodesConfig = node.NodesConfig + requireSliceContainsNumOfElements(t, currNodesConfig.auction, newNode[newOwner].BLSKeys, 0) + node.Process(t, 3) + currNodesConfig = node.NodesConfig + requireMapContains(t, currNodesConfig.leaving, newNode[newOwner].BLSKeys) +} + +func TestStakingV4_JailAndUnJailNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:2], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[2:4], + }, + StakingQueueKeys: pubKeys[4:6], + TotalStake: big.NewInt(10 * nodePrice), + } + + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[6:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:12], + }, + StakingQueueKeys: pubKeys[12:15], + TotalStake: big.NewInt(10 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 1, + ShardConsensusGroupSize: 1, + MinNumberOfEligibleShardNodes: 2, + MinNumberOfEligibleMetaNodes: 2, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 4, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 2) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner2StakingQueue := owner2Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner2StakingQueue...) + require.Len(t, currNodesConfig.queue, 5) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + // 1.1 Jail 4 nodes: + // - 2 nodes from waiting list shard = 0 + // - 2 nodes from waiting list shard = meta chain + jailedNodes := make([][]byte, 0) + jailedNodes = append(jailedNodes, owner1Stats.WaitingBlsKeys[0]...) + jailedNodes = append(jailedNodes, owner2Stats.WaitingBlsKeys[core.MetachainShardId][:2]...) + node.ProcessJail(t, jailedNodes) + + // 1.2 UnJail 2 nodes from initial jailed nodes: + // - 1 node from waiting list shard = 0 + // - 1 node from waiting list shard = meta chain + unJailedNodes := make([][]byte, 0) + unJailedNodes = append(unJailedNodes, owner1Stats.WaitingBlsKeys[0][0]) + unJailedNodes = append(unJailedNodes, owner2Stats.WaitingBlsKeys[core.MetachainShardId][0]) + jailedNodes = remove(jailedNodes, unJailedNodes[0]) + jailedNodes = remove(jailedNodes, unJailedNodes[1]) + node.ProcessUnJail(t, unJailedNodes) + + // 2. Two jailed nodes are now leaving; the other two unJailed nodes are re-staked and distributed on waiting list + node.Process(t, 3) + currNodesConfig = node.NodesConfig + requireMapContains(t, currNodesConfig.leaving, jailedNodes) + requireMapContains(t, currNodesConfig.waiting, unJailedNodes) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, make([][]byte, 0)) + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Empty(t, currNodesConfig.queue) + + // 2.1 ReStake the nodes that were in the queue + // but first, we need to unJail the nodes + node.ProcessUnJail(t, jailedNodes) + node.ProcessReStake(t, queue) + currNodesConfig = node.NodesConfig + queue = append(queue, jailedNodes...) + require.Empty(t, currNodesConfig.queue) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // 3. Epoch = stakingV4Step2 + node.Process(t, 1) + currNodesConfig = node.NodesConfig + queue = append(queue, getAllPubKeys(currNodesConfig.shuffledOut)...) + require.Empty(t, currNodesConfig.queue) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // 3.1 Jail a random node from waiting list + newJailed := getAllPubKeys(currNodesConfig.waiting)[:1] + node.ProcessJail(t, newJailed) + + // 4. Epoch = stakingV4Step3; + // 4.1 Expect jailed node from waiting list is now leaving + node.Process(t, 4) + currNodesConfig = node.NodesConfig + requireMapContains(t, currNodesConfig.leaving, newJailed) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, newJailed, 0) + require.Empty(t, currNodesConfig.queue) + + // 4.2 UnJail previous node and expect it is sent to auction + node.ProcessUnJail(t, newJailed) + currNodesConfig = node.NodesConfig + requireSliceContains(t, currNodesConfig.auction, newJailed) + require.Empty(t, currNodesConfig.queue) + + // 5. Epoch is now after whole staking v4 chain is activated + node.Process(t, 3) + currNodesConfig = node.NodesConfig + queue = currNodesConfig.auction + newJailed = queue[:1] + newUnJailed := newJailed[0] + + // 5.1 Take a random node from auction and jail it; expect it is removed from auction list + node.ProcessJail(t, newJailed) + queue = remove(queue, newJailed[0]) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + + // 5.2 UnJail previous node; expect it is sent back to auction + node.ProcessUnJail(t, [][]byte{newUnJailed}) + queue = append(queue, newUnJailed) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + require.Empty(t, node.NodesConfig.queue) +} + +func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffledToToWaiting(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:4], + 0: pubKeys[4:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:9], + 0: pubKeys[9:10], + }, + TotalStake: big.NewInt(20 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 4, + MinNumberOfEligibleMetaNodes: 4, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, // epoch 3 + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: 6, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + expectedNodesNum := &configNum{ + eligible: map[uint32]int{ + core.MetachainShardId: 4, + 0: 4, + }, + waiting: map[uint32]int{ + core.MetachainShardId: 1, + 0: 1, + }, + } + currNodesConfig := node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + + // During these 9 epochs, we will always have: + // - 10 activeNodes (8 eligible + 2 waiting) + // - 1 node to shuffle out per shard + // Meanwhile, maxNumNodes changes from 12-10-12 + // Since activeNodes <= maxNumNodes, shuffled out nodes will always be sent directly to waiting list, + // instead of auction(there is no reason to send them to auction, they will be selected anyway) + epoch := uint32(0) + numOfShuffledOut := 2 + numRemainingEligible := 6 + prevNodesConfig := currNodesConfig + for epoch < 9 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + + prevNodesConfig = currNodesConfig + epoch++ + } + + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + // Epoch = 9 with: + // - activeNodes = 10 + // - maxNumNodes = 12 + // Owner2 stakes 2 nodes, which should be initially sent to auction list + owner2Nodes := pubKeys[10:12] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner2": { + BLSKeys: owner2Nodes, + TotalStake: big.NewInt(5 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner2Nodes) + + // Epoch = 10 with: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Owner2's new nodes are selected from auction and distributed to waiting list + node.Process(t, 5) + currNodesConfig = node.NodesConfig + expectedNodesNum.waiting[core.MetachainShardId]++ + expectedNodesNum.waiting[0]++ + expectedNodesNum.auction = 0 + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + requireSliceContains(t, getAllPubKeys(currNodesConfig.waiting), owner2Nodes) + + // During epochs 10-13, we will have: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Since activeNodes == maxNumNodes, shuffled out nodes will always be sent directly to waiting list, instead of auction + epoch = 10 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + prevNodesConfig = currNodesConfig + for epoch < 13 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + + prevNodesConfig = currNodesConfig + epoch++ + } + + // Epoch = 13 with: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Owner3 stakes 2 nodes, which should be initially sent to auction list + owner3Nodes := pubKeys[12:14] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner3": { + BLSKeys: owner3Nodes, + TotalStake: big.NewInt(5 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner3Nodes) + + // During epochs 14-18, we will have: + // - activeNodes = 14 + // - maxNumNodes = 12 + // Since activeNodes > maxNumNodes, shuffled out nodes (2) will be sent to auction list + node.Process(t, 5) + prevNodesConfig = node.NodesConfig + epoch = 14 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + numOfUnselectedNodesFromAuction := 0 + numOfSelectedNodesFromAuction := 2 + for epoch < 18 { + checkConfig(t, expectedNodesNum, currNodesConfig) + + node.Process(t, 5) + currNodesConfig = node.NodesConfig + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } + + // Epoch = 18, with: + // - activeNodes = 14 + // - maxNumNodes = 12 + // Owner3 unStakes one of his nodes + node.ProcessUnStake(t, map[string][][]byte{ + "owner3": {owner3Nodes[0]}, + }) + + // Epoch = 19, with: + // - activeNodes = 13 + // - maxNumNodes = 12 + // Owner3's unStaked node is now leaving + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.leaving, 1) + requireMapContains(t, currNodesConfig.leaving, [][]byte{owner3Nodes[0]}) + + epoch = 19 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + // During epochs 19-23, we will have: + // - activeNodes = 13 + // - maxNumNodes = 12 + // Since activeNodes > maxNumNodes: + // - shuffled out nodes (2) will be sent to auction list + // - waiting lists will be unbalanced (3 in total: 1 + 2 per shard) + // - no node will spend extra epochs in eligible/waiting, since waiting lists will always be refilled + prevNodesConfig = node.NodesConfig + for epoch < 23 { + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 3) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.auction, 2) + + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } +} + +func TestStakingV4_NewlyStakedNodesInStakingV4Step2ShouldBeSentToWaitingIfListIsTooLow(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:4], + 0: pubKeys[4:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:9], + 0: pubKeys[9:10], + }, + TotalStake: big.NewInt(20 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 4, + MinNumberOfEligibleMetaNodes: 4, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 20, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 18, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + expectedNodesNum := &configNum{ + eligible: map[uint32]int{ + core.MetachainShardId: 4, + 0: 4, + }, + waiting: map[uint32]int{ + core.MetachainShardId: 1, + 0: 1, + }, + } + currNodesConfig := node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + + // Epoch = 0, before staking v4, owner2 stakes 2 nodes + // - maxNumNodes = 20 + // - activeNumNodes = 10 + // Newly staked nodes should be sent to new list + owner2Nodes := pubKeys[12:14] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner2": { + BLSKeys: owner2Nodes, + TotalStake: big.NewInt(2 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.new = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.new, owner2Nodes) + + // Epoch = 1, staking v4 step 1 + // - maxNumNodes = 20 + // - activeNumNodes = 12 + // Owner2's new nodes should have been sent to waiting + node.Process(t, 5) + currNodesConfig = node.NodesConfig + expectedNodesNum.new = 0 + expectedNodesNum.waiting[0]++ + expectedNodesNum.waiting[core.MetachainShardId]++ + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), owner2Nodes, 2) + + // Epoch = 1, before staking v4, owner3 stakes 2 nodes + // - maxNumNodes = 20 + // - activeNumNodes = 12 + // Newly staked nodes should be sent to auction list + owner3Nodes := pubKeys[15:17] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner3": { + BLSKeys: owner3Nodes, + TotalStake: big.NewInt(2 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner3Nodes) + + // Epoch = 2, staking v4 step 2 + // - maxNumNodes = 20 + // - activeNumNodes = 14 + // Owner3's auction nodes should have been sent to waiting + node.Process(t, 5) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 0 + expectedNodesNum.waiting[0]++ + expectedNodesNum.waiting[core.MetachainShardId]++ + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), owner3Nodes, 2) + + // During epochs 2-6, we will have: + // - activeNodes = 14 + // - maxNumNodes = 18-20 + // Since activeNodes < maxNumNodes, shuffled out nodes will always be sent directly to waiting list, instead of auction + epoch := uint32(2) + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + numOfShuffledOut := 2 + numRemainingEligible := 6 + numOfUnselectedNodesFromAuction := 0 + numOfSelectedNodesFromAuction := 0 + + prevNodesConfig := currNodesConfig + for epoch < 6 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } +} + +func TestStakingV4_LeavingNodesEdgeCases(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:3], + 0: pubKeys[3:6], + 1: pubKeys[6:9], + 2: pubKeys[9:12], + }, + TotalStake: big.NewInt(12 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 3, + ShardConsensusGroupSize: 3, + MinNumberOfEligibleShardNodes: 3, + MinNumberOfEligibleMetaNodes: 3, + NumOfShards: 3, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 16, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 18, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 12, + NodesToShufflePerShard: 2, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(5) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 0) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 0) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 0) + require.Len(t, currNodesConfig.eligible[1], 3) + require.Len(t, currNodesConfig.waiting[1], 0) + require.Len(t, currNodesConfig.eligible[2], 3) + require.Len(t, currNodesConfig.waiting[2], 0) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // NewOwner0 stakes 1 node with top up = 0 before staking v4; should be sent to new nodes, since there are enough slots + newOwner0 := "newOwner0" + newOwner0BlsKeys := [][]byte{generateAddress(101)} + node.ProcessStake(t, map[string]*NodesRegisterData{ + newOwner0: { + BLSKeys: newOwner0BlsKeys, + TotalStake: big.NewInt(nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, currNodesConfig.new, newOwner0BlsKeys) + + // UnStake one of the initial nodes + node.ProcessUnStake(t, map[string][][]byte{ + owner1: {owner1Stats.EligibleBlsKeys[core.MetachainShardId][0]}, + }) + + // Fast-forward few epochs such that the whole staking v4 is activated. + // We should have same 12 initial nodes + 1 extra node (because of legacy code where all leaving nodes were + // considered to be eligible and the unStaked node was forced to remain eligible) + node.Process(t, 49) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 1) + + // Stake 10 extra nodes and check that they are sent to auction + newOwner1 := "newOwner1" + newOwner1BlsKeys := generateAddresses(303, 10) + node.ProcessStake(t, map[string]*NodesRegisterData{ + newOwner1: { + BLSKeys: newOwner1BlsKeys, + TotalStake: big.NewInt(nodePrice * 10), + }, + }) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, currNodesConfig.auction, newOwner1BlsKeys) + + // After 2 epochs, unStake all previously staked keys. Some of them have been already sent to eligible/waiting, but most + // of them are still in auction. UnStaked nodes' status from auction should be: leaving now, but their previous list was auction. + // We should not force his auction nodes as being eligible in the next epoch. We should only force his existing active + // nodes to remain in the system. + node.Process(t, 10) + currNodesConfig = node.NodesConfig + newOwner1AuctionNodes := getIntersection(currNodesConfig.auction, newOwner1BlsKeys) + newOwner1EligibleNodes := getIntersection(getAllPubKeys(currNodesConfig.eligible), newOwner1BlsKeys) + newOwner1WaitingNodes := getIntersection(getAllPubKeys(currNodesConfig.waiting), newOwner1BlsKeys) + newOwner1ActiveNodes := append(newOwner1EligibleNodes, newOwner1WaitingNodes...) + require.Equal(t, len(newOwner1AuctionNodes)+len(newOwner1ActiveNodes), len(newOwner1BlsKeys)) // sanity check + + node.ClearStoredMbs() + node.ProcessUnStake(t, map[string][][]byte{ + newOwner1: newOwner1BlsKeys, + }) + + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + requireMapContains(t, currNodesConfig.leaving, newOwner1AuctionNodes) + requireMapDoesNotContain(t, currNodesConfig.eligible, newOwner1AuctionNodes) + requireMapDoesNotContain(t, currNodesConfig.waiting, newOwner1AuctionNodes) + + allCurrentActiveNodes := append(getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(currNodesConfig.waiting)...) + owner1NodesThatAreStillForcedToRemain := getIntersection(allCurrentActiveNodes, newOwner1ActiveNodes) + require.NotZero(t, len(owner1NodesThatAreStillForcedToRemain)) + + // Fast-forward some epochs, no error should occur, and we should have our initial config of: + // - 12 eligible nodes + // - 1 waiting list + // - some forced nodes to remain from newOwner1 + node.Process(t, 10) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 1) + allCurrentActiveNodes = append(getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(currNodesConfig.waiting)...) + owner1NodesThatAreStillForcedToRemain = getIntersection(allCurrentActiveNodes, newOwner1ActiveNodes) + require.NotZero(t, len(owner1NodesThatAreStillForcedToRemain)) + + // Stake 10 extra nodes such that the forced eligible nodes from previous newOwner1 can leave the system + // and are replaced by new nodes + newOwner2 := "newOwner2" + newOwner2BlsKeys := generateAddresses(403, 10) + node.ProcessStake(t, map[string]*NodesRegisterData{ + newOwner2: { + BLSKeys: newOwner2BlsKeys, + TotalStake: big.NewInt(nodePrice * 10), + }, + }) + currNodesConfig = node.NodesConfig + requireSliceContains(t, currNodesConfig.auction, newOwner2BlsKeys) + + // Fast-forward multiple epochs and check that newOwner1's forced nodes from previous epochs left + node.Process(t, 20) + currNodesConfig = node.NodesConfig + allCurrentNodesInSystem := getAllPubKeysFromConfig(currNodesConfig) + owner1LeftNodes := getIntersection(owner1NodesThatAreStillForcedToRemain, allCurrentNodesInSystem) + require.Zero(t, len(owner1LeftNodes)) +} + +func TestStakingV4LeavingNodesShouldDistributeToWaitingOnlyNecessaryNodes(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfMetaNodes := uint32(400) + numOfShards := uint32(3) + numOfEligibleNodesPerShard := uint32(400) + numOfWaitingNodesPerShard := uint32(400) + numOfNodesToShufflePerShard := uint32(80) + shardConsensusGroupSize := 266 + metaConsensusGroupSize := 266 + numOfNodesInStakingQueue := uint32(80) + + totalEligible := int(numOfEligibleNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + totalWaiting := int(numOfWaitingNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + + node := NewTestMetaProcessor( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + numOfNodesInStakingQueue, + ) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + initialNodes := node.NodesConfig + require.Len(t, getAllPubKeys(initialNodes.eligible), totalEligible) + require.Len(t, getAllPubKeys(initialNodes.waiting), totalWaiting) + require.Len(t, initialNodes.queue, int(numOfNodesInStakingQueue)) + require.Empty(t, initialNodes.shuffledOut) + require.Empty(t, initialNodes.auction) + + // 2. Check config after staking v4 initialization + node.Process(t, 5) + nodesConfigStakingV4Step1 := node.NodesConfig + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.eligible), totalEligible) + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.waiting), totalWaiting) + require.Empty(t, nodesConfigStakingV4Step1.queue) + require.Empty(t, nodesConfigStakingV4Step1.shuffledOut) + require.Empty(t, nodesConfigStakingV4Step1.auction) // the queue should be empty + + // 3. re-stake the node nodes that were in the queue + node.ProcessReStake(t, initialNodes.queue) + nodesConfigStakingV4Step1 = node.NodesConfig + requireSameSliceDifferentOrder(t, initialNodes.queue, nodesConfigStakingV4Step1.auction) + + // Reach step 3 and check normal flow + node.Process(t, 10) + epochs := 0 + prevConfig := node.NodesConfig + numOfSelectedNodesFromAuction := 320 // 320, since we will always fill shuffled out nodes with this config + numOfUnselectedNodesFromAuction := 80 // 80 = 400 from queue - 320 + numOfShuffledOut := 80 * 4 // 80 per shard + meta + for epochs < 3 { + node.Process(t, 5) + newNodeConfig := node.NodesConfig + + require.Len(t, getAllPubKeys(newNodeConfig.eligible), totalEligible) // 1600 + require.Len(t, getAllPubKeys(newNodeConfig.waiting), 1280) // 1280 + require.Len(t, getAllPubKeys(newNodeConfig.shuffledOut), 320) // 320 + require.Len(t, newNodeConfig.auction, 400) // 400 + require.Empty(t, newNodeConfig.queue) + require.Empty(t, newNodeConfig.leaving) + + checkStakingV4EpochChangeFlow(t, newNodeConfig, prevConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + prevConfig = newNodeConfig + epochs++ + } + + // UnStake: + // - 46 from waiting + eligible ( 13 waiting + 36 eligible) + // - 11 from auction + currNodesCfg := node.NodesConfig + nodesToUnStakeFromAuction := currNodesCfg.auction[:11] + + nodesToUnStakeFromWaiting := append(currNodesCfg.waiting[0][:3], currNodesCfg.waiting[1][:3]...) + nodesToUnStakeFromWaiting = append(nodesToUnStakeFromWaiting, currNodesCfg.waiting[2][:3]...) + nodesToUnStakeFromWaiting = append(nodesToUnStakeFromWaiting, currNodesCfg.waiting[core.MetachainShardId][:4]...) + + nodesToUnStakeFromEligible := append(currNodesCfg.eligible[0][:8], currNodesCfg.eligible[1][:8]...) + nodesToUnStakeFromEligible = append(nodesToUnStakeFromEligible, currNodesCfg.eligible[2][:8]...) + nodesToUnStakeFromEligible = append(nodesToUnStakeFromEligible, currNodesCfg.eligible[core.MetachainShardId][:9]...) + + nodesToUnStake := getAllOwnerNodesMap(nodesToUnStakeFromAuction, nodesToUnStakeFromWaiting, nodesToUnStakeFromEligible) + + prevConfig = currNodesCfg + node.ProcessUnStake(t, nodesToUnStake) + node.Process(t, 5) + currNodesCfg = node.NodesConfig + + require.Len(t, getAllPubKeys(currNodesCfg.leaving), 57) // 11 auction + 46 active (13 waiting + 36 eligible) + require.Len(t, getAllPubKeys(currNodesCfg.shuffledOut), 274) // 320 - 46 active + require.Len(t, currNodesCfg.auction, 343) // 400 initial - 57 leaving + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesCfg.waiting), prevConfig.auction, 320) // 320 selected + requireSliceContainsNumOfElements(t, currNodesCfg.auction, prevConfig.auction, 69) // 69 unselected + require.Len(t, getAllPubKeys(currNodesCfg.eligible), 1600) + require.Len(t, getAllPubKeys(currNodesCfg.waiting), 1280) + + prevConfig = currNodesCfg + // UnStake: + // - 224 from waiting + eligible ( 13 waiting + 36 eligible), but unbalanced: + // -> unStake 100 from waiting shard=meta + // -> unStake 90 from eligible shard=2 + // - 11 from auction + nodesToUnStakeFromAuction = currNodesCfg.auction[:11] + nodesToUnStakeFromWaiting = append(currNodesCfg.waiting[0][:3], currNodesCfg.waiting[1][:3]...) + nodesToUnStakeFromWaiting = append(nodesToUnStakeFromWaiting, currNodesCfg.waiting[2][:3]...) + nodesToUnStakeFromWaiting = append(nodesToUnStakeFromWaiting, currNodesCfg.waiting[core.MetachainShardId][:100]...) + + nodesToUnStakeFromEligible = append(currNodesCfg.eligible[0][:8], currNodesCfg.eligible[1][:8]...) + nodesToUnStakeFromEligible = append(nodesToUnStakeFromEligible, currNodesCfg.eligible[2][:90]...) + nodesToUnStakeFromEligible = append(nodesToUnStakeFromEligible, currNodesCfg.eligible[core.MetachainShardId][:9]...) + + nodesToUnStake = getAllOwnerNodesMap(nodesToUnStakeFromAuction, nodesToUnStakeFromWaiting, nodesToUnStakeFromEligible) + node.ProcessUnStake(t, nodesToUnStake) + node.Process(t, 4) + currNodesCfg = node.NodesConfig + + // Leaving: + // - 11 auction + // - shard 0 = 11 + // - shard 1 = 11 + // - shard 2 = 80 (there were 93 unStakes, but only 80 will be leaving, rest 13 will be forced to stay) + // - shard meta = 80 (there were 109 unStakes, but only 80 will be leaving, rest 29 will be forced to stay) + // Therefore we will have in total actually leaving = 193 (11 + 11 + 11 + 80 + 80) + // We should see a log in selector like this: + // auctionListSelector.SelectNodesFromAuctionList max nodes = 2880 current number of validators = 2656 num of nodes which will be shuffled out = 138 num forced to stay = 42 num of validators after shuffling = 2518 auction list size = 332 available slots (2880 - 2560) = 320 + require.Len(t, getAllPubKeys(currNodesCfg.leaving), 193) + require.Len(t, getAllPubKeys(currNodesCfg.shuffledOut), 138) // 69 from shard0 + shard from shard1, rest will not be shuffled + require.Len(t, currNodesCfg.auction, 150) // 138 shuffled out + 12 unselected + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesCfg.waiting), prevConfig.auction, 320) // 320 selected + requireSliceContainsNumOfElements(t, currNodesCfg.auction, prevConfig.auction, 12) // 12 unselected + require.Len(t, getAllPubKeys(currNodesCfg.eligible), 1600) + require.Len(t, getAllPubKeys(currNodesCfg.waiting), 1280) +} + +func TestStakingV4MoreLeavingNodesThanToShufflePerShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfMetaNodes := uint32(400) + numOfShards := uint32(3) + numOfEligibleNodesPerShard := uint32(400) + numOfWaitingNodesPerShard := uint32(400) + numOfNodesToShufflePerShard := uint32(80) + shardConsensusGroupSize := 266 + metaConsensusGroupSize := 266 + numOfNodesInStakingQueue := uint32(80) + + totalEligible := int(numOfEligibleNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + totalWaiting := int(numOfWaitingNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + + node := NewTestMetaProcessor( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + numOfNodesInStakingQueue, + ) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + initialNodes := node.NodesConfig + require.Len(t, getAllPubKeys(initialNodes.eligible), totalEligible) + require.Len(t, getAllPubKeys(initialNodes.waiting), totalWaiting) + require.Len(t, initialNodes.queue, int(numOfNodesInStakingQueue)) + require.Empty(t, initialNodes.shuffledOut) + require.Empty(t, initialNodes.auction) + + // 2. Check config after staking v4 initialization + node.Process(t, 5) + nodesConfigStakingV4Step1 := node.NodesConfig + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.eligible), totalEligible) + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.waiting), totalWaiting) + require.Empty(t, nodesConfigStakingV4Step1.queue) + require.Empty(t, nodesConfigStakingV4Step1.shuffledOut) + require.Empty(t, nodesConfigStakingV4Step1.auction) // the queue should be empty + + // 3. re-stake the node nodes that were in the queue + node.ProcessReStake(t, initialNodes.queue) + nodesConfigStakingV4Step1 = node.NodesConfig + requireSameSliceDifferentOrder(t, initialNodes.queue, nodesConfigStakingV4Step1.auction) + + // Reach step 3 + node.Process(t, 10) + + // UnStake 100 nodes from each shard: + // - shard 0: 100 waiting + // - shard 1: 50 waiting + 50 eligible + // - shard 2: 20 waiting + 80 eligible + // - shard meta: 100 eligible + currNodesCfg := node.NodesConfig + + nodesToUnStakeFromWaiting := currNodesCfg.waiting[0][:100] + nodesToUnStakeFromWaiting = append(nodesToUnStakeFromWaiting, currNodesCfg.waiting[1][:50]...) + nodesToUnStakeFromWaiting = append(nodesToUnStakeFromWaiting, currNodesCfg.waiting[2][:20]...) + + nodesToUnStakeFromEligible := currNodesCfg.eligible[1][:50] + nodesToUnStakeFromEligible = append(nodesToUnStakeFromEligible, currNodesCfg.eligible[2][:80]...) + nodesToUnStakeFromEligible = append(nodesToUnStakeFromEligible, currNodesCfg.eligible[core.MetachainShardId][:100]...) + + nodesToUnStake := getAllOwnerNodesMap(nodesToUnStakeFromWaiting, nodesToUnStakeFromEligible) + + prevConfig := currNodesCfg + node.ProcessUnStake(t, nodesToUnStake) + node.Process(t, 4) + currNodesCfg = node.NodesConfig + + require.Len(t, getAllPubKeys(currNodesCfg.leaving), 320) // we unStaked 400, but only allowed 320 to leave + require.Len(t, getAllPubKeys(currNodesCfg.shuffledOut), 0) // no shuffled out, since 80 per shard were leaving + require.Len(t, currNodesCfg.auction, 80) // 400 initial - 320 selected + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesCfg.waiting), prevConfig.auction, 320) // 320 selected + requireSliceContainsNumOfElements(t, currNodesCfg.auction, prevConfig.auction, 80) // 80 unselected + require.Len(t, getAllPubKeys(currNodesCfg.eligible), 1600) + require.Len(t, getAllPubKeys(currNodesCfg.waiting), 1280) + + // Add 400 new nodes in the system and fast-forward + node.ProcessStake(t, map[string]*NodesRegisterData{ + "ownerX": { + BLSKeys: generateAddresses(99999, 400), + TotalStake: big.NewInt(nodePrice * 400), + }, + }) + node.Process(t, 10) + + // UnStake exactly 80 nodes + prevConfig = node.NodesConfig + nodesToUnStake = getAllOwnerNodesMap(node.NodesConfig.eligible[1][:80]) + node.ProcessUnStake(t, nodesToUnStake) + node.Process(t, 4) + + currNodesCfg = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesCfg.leaving), 80) // 320 - 80 leaving + require.Len(t, getAllPubKeys(currNodesCfg.shuffledOut), 240) // 240 shuffled out + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesCfg.waiting), prevConfig.auction, 320) // 320 selected + requireSliceContainsNumOfElements(t, currNodesCfg.auction, prevConfig.auction, 80) // 80 unselected + require.Len(t, getAllPubKeys(currNodesCfg.eligible), 1600) + require.Len(t, getAllPubKeys(currNodesCfg.waiting), 1280) +} diff --git a/integrationTests/vm/staking/systemSCCreator.go b/integrationTests/vm/staking/systemSCCreator.go new file mode 100644 index 00000000000..cf18140797a --- /dev/null +++ b/integrationTests/vm/staking/systemSCCreator.go @@ -0,0 +1,264 @@ +package staking + +import ( + "bytes" + "strconv" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + epochStartMock "github.com/multiversx/mx-chain-go/epochStart/mock" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/genesis/process/disabled" + "github.com/multiversx/mx-chain-go/process" + metaProcess "github.com/multiversx/mx-chain-go/process/factory/metachain" + "github.com/multiversx/mx-chain-go/process/peer" + "github.com/multiversx/mx-chain-go/process/smartContract/builtInFunctions" + "github.com/multiversx/mx-chain-go/process/smartContract/hooks" + "github.com/multiversx/mx-chain-go/process/smartContract/hooks/counters" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + vmcommonMock "github.com/multiversx/mx-chain-vm-common-go/mock" +) + +func createSystemSCProcessor( + nc nodesCoordinator.NodesCoordinator, + coreComponents factory.CoreComponentsHolder, + stateComponents factory.StateComponentsHandler, + shardCoordinator sharding.Coordinator, + maxNodesConfig []config.MaxNodesChangeConfig, + validatorStatisticsProcessor process.ValidatorStatisticsProcessor, + systemVM vmcommon.VMExecutionHandler, + stakingDataProvider epochStart.StakingDataProvider, +) process.EpochStartSystemSCProcessor { + maxNodesChangeConfigProvider, _ := notifier.NewNodesConfigProvider( + coreComponents.EpochNotifier(), + maxNodesConfig, + ) + + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := metachain.NewAuctionListDisplayer(metachain.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachain.NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + }) + + argsAuctionListSelector := metachain.AuctionListSelectorArgs{ + ShardCoordinator: shardCoordinator, + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + auctionListSelector, _ := metachain.NewAuctionListSelector(argsAuctionListSelector) + + args := metachain.ArgsNewEpochStartSystemSCProcessing{ + SystemVM: systemVM, + UserAccountsDB: stateComponents.AccountsAdapter(), + PeerAccountsDB: stateComponents.PeerAccounts(), + Marshalizer: coreComponents.InternalMarshalizer(), + StartRating: initialRating, + ValidatorInfoCreator: validatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: &epochStartMock.ChanceComputerStub{}, + EpochNotifier: coreComponents.EpochNotifier(), + GenesisNodesConfig: &genesisMocks.NodesSetupStub{}, + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: nc, + ShardCoordinator: shardCoordinator, + ESDTOwnerAddressBytes: bytes.Repeat([]byte{1}, 32), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListSelector: auctionListSelector, + } + + systemSCProcessor, _ := metachain.NewSystemSCProcessor(args) + return systemSCProcessor +} + +func createStakingDataProvider( + enableEpochsHandler common.EnableEpochsHandler, + systemVM vmcommon.VMExecutionHandler, +) epochStart.StakingDataProvider { + argsStakingDataProvider := metachain.StakingDataProviderArgs{ + EnableEpochsHandler: enableEpochsHandler, + SystemVM: systemVM, + MinNodePrice: strconv.Itoa(nodePrice), + } + stakingSCProvider, _ := metachain.NewStakingDataProvider(argsStakingDataProvider) + + return stakingSCProvider +} + +func createValidatorStatisticsProcessor( + dataComponents factory.DataComponentsHolder, + coreComponents factory.CoreComponentsHolder, + nc nodesCoordinator.NodesCoordinator, + shardCoordinator sharding.Coordinator, + peerAccounts state.AccountsAdapter, +) process.ValidatorStatisticsProcessor { + argsValidatorsProcessor := peer.ArgValidatorStatisticsProcessor{ + Marshalizer: coreComponents.InternalMarshalizer(), + NodesCoordinator: nc, + ShardCoordinator: shardCoordinator, + DataPool: dataComponents.Datapool(), + StorageService: dataComponents.StorageService(), + PubkeyConv: coreComponents.AddressPubKeyConverter(), + PeerAdapter: peerAccounts, + Rater: coreComponents.Rater(), + RewardsHandler: &epochStartMock.RewardsHandlerStub{}, + NodesSetup: &genesisMocks.NodesSetupStub{}, + MaxComputableRounds: 1, + MaxConsecutiveRoundsOfRatingDecrease: 2000, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + validatorStatisticsProcessor, _ := peer.NewValidatorStatisticsProcessor(argsValidatorsProcessor) + return validatorStatisticsProcessor +} + +func createBlockChainHook( + dataComponents factory.DataComponentsHolder, + coreComponents factory.CoreComponentsHolder, + accountsAdapter state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + gasScheduleNotifier core.GasScheduleNotifier, +) (hooks.ArgBlockChainHook, process.BlockChainHookWithAccountsAdapter) { + argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ + GasSchedule: gasScheduleNotifier, + MapDNSAddresses: make(map[string]struct{}), + Marshalizer: coreComponents.InternalMarshalizer(), + Accounts: accountsAdapter, + ShardCoordinator: shardCoordinator, + EpochNotifier: coreComponents.EpochNotifier(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + AutomaticCrawlerAddresses: [][]byte{core.SystemAccountAddress}, + MaxNumNodesInTransferRole: 1, + GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, + MapDNSV2Addresses: make(map[string]struct{}), + } + + builtInFunctionsContainer, _ := builtInFunctions.CreateBuiltInFunctionsFactory(argsBuiltIn) + _ = builtInFunctionsContainer.CreateBuiltInFunctionContainer() + builtInFunctionsContainer.BuiltInFunctionContainer() + + argsHook := hooks.ArgBlockChainHook{ + Accounts: accountsAdapter, + PubkeyConv: coreComponents.AddressPubKeyConverter(), + StorageService: dataComponents.StorageService(), + BlockChain: dataComponents.Blockchain(), + ShardCoordinator: shardCoordinator, + Marshalizer: coreComponents.InternalMarshalizer(), + Uint64Converter: coreComponents.Uint64ByteSliceConverter(), + NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, + BuiltInFunctions: builtInFunctionsContainer.BuiltInFunctionContainer(), + DataPool: dataComponents.Datapool(), + CompiledSCPool: dataComponents.Datapool().SmartContracts(), + EpochNotifier: coreComponents.EpochNotifier(), + GlobalSettingsHandler: &vmcommonMock.GlobalSettingsHandlerStub{}, + NilCompiledSCStore: true, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + GasSchedule: gasScheduleNotifier, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, + } + + blockChainHook, _ := hooks.NewBlockChainHookImpl(argsHook) + return argsHook, blockChainHook +} + +func createVMContainerFactory( + coreComponents factory.CoreComponentsHolder, + gasScheduleNotifier core.GasScheduleNotifier, + blockChainHook process.BlockChainHookWithAccountsAdapter, + argsBlockChainHook hooks.ArgBlockChainHook, + stateComponents factory.StateComponentsHandler, + shardCoordinator sharding.Coordinator, + nc nodesCoordinator.NodesCoordinator, + maxNumNodes uint32, +) process.VirtualMachinesContainerFactory { + signVerifer, _ := disabled.NewMessageSignVerifier(&cryptoMocks.KeyGenStub{}) + + argsNewVMContainerFactory := metaProcess.ArgsNewVMContainerFactory{ + BlockChainHook: blockChainHook, + PubkeyConv: coreComponents.AddressPubKeyConverter(), + Economics: coreComponents.EconomicsData(), + MessageSignVerifier: signVerifer, + GasSchedule: gasScheduleNotifier, + NodesConfigProvider: &genesisMocks.NodesSetupStub{}, + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + SystemSCConfig: &config.SystemSmartContractsConfig{ + ESDTSystemSCConfig: config.ESDTSystemSCConfig{ + BaseIssuingCost: "1000", + OwnerAddress: "aaaaaa", + }, + GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + NumNodes: 2000, + ProposalCost: "500", + MinQuorum: 50, + MinPassThreshold: 10, + MinVetoThreshold: 10, + }, + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", + }, + StakingSystemSCConfig: config.StakingSystemSCConfig{ + GenesisNodePrice: strconv.Itoa(nodePrice), + UnJailValue: "10", + MinStepValue: "10", + MinStakeValue: "1", + UnBondPeriod: 1, + NumRoundsWithoutBleed: 1, + MaximumPercentageToBleed: 1, + BleedPercentagePerRound: 1, + MaxNumberOfNodesForStake: uint64(maxNumNodes), + ActivateBLSPubKeyMessageVerification: false, + MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, + }, + DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ + MinCreationDeposit: "100", + MinStakeAmount: "100", + ConfigChangeAddress: "3132333435363738393031323334353637383930313233343536373839303234", + }, + DelegationSystemSCConfig: config.DelegationSystemSCConfig{ + MinServiceFee: 0, + MaxServiceFee: 100, + }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, + }, + ValidatorAccountsDB: stateComponents.PeerAccounts(), + ChanceComputer: coreComponents.Rater(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + ShardCoordinator: shardCoordinator, + NodesCoordinator: nc, + UserAccountsDB: stateComponents.AccountsAdapter(), + ArgBlockChainHook: argsBlockChainHook, + } + + metaVmFactory, _ := metaProcess.NewVMContainerFactory(argsNewVMContainerFactory) + return metaVmFactory +} diff --git a/integrationTests/vm/staking/testMetaProcessor.go b/integrationTests/vm/staking/testMetaProcessor.go new file mode 100644 index 00000000000..168287b66bc --- /dev/null +++ b/integrationTests/vm/staking/testMetaProcessor.go @@ -0,0 +1,99 @@ +package staking + +import ( + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" +) + +// NewTestMetaProcessor - +func NewTestMetaProcessor( + numOfMetaNodes uint32, + numOfShards uint32, + numOfEligibleNodesPerShard uint32, + numOfWaitingNodesPerShard uint32, + numOfNodesToShufflePerShard uint32, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + numOfNodesInStakingQueue uint32, +) *TestMetaProcessor { + coreComponents, dataComponents, bootstrapComponents, statusComponents, stateComponents := createComponentHolders(numOfShards) + + maxNodesConfig := createMaxNodesConfig( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + ) + + queue := createStakingQueue( + numOfNodesInStakingQueue, + maxNodesConfig[0].MaxNumNodes, + coreComponents.InternalMarshalizer(), + stateComponents.AccountsAdapter(), + ) + + eligibleMap, waitingMap := createGenesisNodes( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + coreComponents.InternalMarshalizer(), + stateComponents, + ) + + bootStrapStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + nc := createNodesCoordinator( + eligibleMap, + waitingMap, + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + coreComponents, + bootStrapStorer, + bootstrapComponents.NodesCoordinatorRegistryFactory(), + maxNodesConfig, + ) + + return newTestMetaProcessor( + coreComponents, + dataComponents, + bootstrapComponents, + statusComponents, + stateComponents, + nc, + maxNodesConfig, + queue, + ) +} + +func createMaxNodesConfig( + numOfMetaNodes uint32, + numOfShards uint32, + numOfEligibleNodesPerShard uint32, + numOfWaitingNodesPerShard uint32, + numOfNodesToShufflePerShard uint32, +) []config.MaxNodesChangeConfig { + totalEligible := numOfMetaNodes + numOfShards*numOfEligibleNodesPerShard + totalWaiting := (numOfShards + 1) * numOfWaitingNodesPerShard + totalNodes := totalEligible + totalWaiting + + maxNodesConfig := make([]config.MaxNodesChangeConfig, 0) + maxNodesConfig = append(maxNodesConfig, config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: totalNodes, + NodesToShufflePerShard: numOfNodesToShufflePerShard, + }, + ) + + maxNodesConfig = append(maxNodesConfig, config.MaxNodesChangeConfig{ + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: totalNodes - numOfNodesToShufflePerShard*(numOfShards+1), + NodesToShufflePerShard: numOfNodesToShufflePerShard, + }, + ) + + return maxNodesConfig +} diff --git a/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go b/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go new file mode 100644 index 00000000000..f9e9da84a8d --- /dev/null +++ b/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go @@ -0,0 +1,358 @@ +package staking + +import ( + "bytes" + "encoding/hex" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +// OwnerStats - +type OwnerStats struct { + EligibleBlsKeys map[uint32][][]byte + WaitingBlsKeys map[uint32][][]byte + StakingQueueKeys [][]byte + TotalStake *big.Int +} + +// InitialNodesConfig - +type InitialNodesConfig struct { + Owners map[string]*OwnerStats + MaxNodesChangeConfig []config.MaxNodesChangeConfig + NumOfShards uint32 + MinNumberOfEligibleShardNodes uint32 + MinNumberOfEligibleMetaNodes uint32 + ShardConsensusGroupSize int + MetaConsensusGroupSize int +} + +// NewTestMetaProcessorWithCustomNodes - +func NewTestMetaProcessorWithCustomNodes(config *InitialNodesConfig) *TestMetaProcessor { + coreComponents, dataComponents, bootstrapComponents, statusComponents, stateComponents := createComponentHolders(config.NumOfShards) + + queue := createStakingQueueCustomNodes( + config.Owners, + coreComponents.InternalMarshalizer(), + stateComponents.AccountsAdapter(), + ) + + eligibleMap, waitingMap := createGenesisNodesWithCustomConfig( + config.Owners, + coreComponents.InternalMarshalizer(), + stateComponents, + ) + + bootstrapStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + nc := createNodesCoordinator( + eligibleMap, + waitingMap, + config.MinNumberOfEligibleMetaNodes, + config.NumOfShards, + config.MinNumberOfEligibleShardNodes, + config.ShardConsensusGroupSize, + config.MetaConsensusGroupSize, + coreComponents, + bootstrapStorer, + bootstrapComponents.NodesCoordinatorRegistryFactory(), + config.MaxNodesChangeConfig, + ) + + return newTestMetaProcessor( + coreComponents, + dataComponents, + bootstrapComponents, + statusComponents, + stateComponents, + nc, + config.MaxNodesChangeConfig, + queue, + ) +} + +// NodesRegisterData - +type NodesRegisterData struct { + BLSKeys [][]byte + TotalStake *big.Int +} + +// ProcessStake will create a block containing mini blocks with staking txs using provided nodes. +// Block will be committed + call to validator system sc will be made to stake all nodes +func (tmp *TestMetaProcessor) ProcessStake(t *testing.T, nodes map[string]*NodesRegisterData) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for owner, registerData := range nodes { + scrs := tmp.doStake(t, []byte(owner), registerData) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doStake( + t *testing.T, + owner []byte, + registerData *NodesRegisterData, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: owner, + Arguments: createStakeArgs(registerData.BLSKeys), + CallValue: registerData.TotalStake, + GasProvided: 400, + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "stake", + } + + return tmp.runSC(t, arguments) +} + +// ProcessReStake will create a block containing mini blocks with re-staking txs using provided nodes. +// Block will be committed + call to validator system sc will be made to stake all nodes +func (tmp *TestMetaProcessor) ProcessReStake(t *testing.T, blsKeys [][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for _, blsKey := range blsKeys { + scrs := tmp.doReStake(t, blsKey) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doReStake( + t *testing.T, + blsKey []byte, +) map[string]*smartContractResult.SmartContractResult { + owner := tmp.getOwnerOfBLSKey(t, blsKey) + + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: owner, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "reStakeUnStakedNodes", + } + + return tmp.runSC(t, arguments) +} + +func (tmp *TestMetaProcessor) getOwnerOfBLSKey(t *testing.T, blsKey []byte) []byte { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.ValidatorSCAddress, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "getOwner", + } + + vmOutput, err := tmp.SystemVM.RunSmartContractCall(arguments) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + + return vmOutput.ReturnData[0] +} + +func createStakeArgs(blsKeys [][]byte) [][]byte { + numBLSKeys := int64(len(blsKeys)) + numBLSKeysBytes := big.NewInt(numBLSKeys).Bytes() + argsStake := [][]byte{numBLSKeysBytes} + + for _, blsKey := range blsKeys { + signature := append([]byte("signature-"), blsKey...) + argsStake = append(argsStake, blsKey, signature) + } + + return argsStake +} + +// ProcessUnStake will create a block containing mini blocks with unStaking txs using provided nodes. +// Block will be committed + call to validator system sc will be made to unStake all nodes +func (tmp *TestMetaProcessor) ProcessUnStake(t *testing.T, nodes map[string][][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for owner, blsKeys := range nodes { + scrs := tmp.doUnStake(t, []byte(owner), blsKeys) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doUnStake( + t *testing.T, + owner []byte, + blsKeys [][]byte, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: owner, + Arguments: blsKeys, + CallValue: big.NewInt(0), + GasProvided: 100, + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "unStake", + } + + return tmp.runSC(t, arguments) +} + +// ProcessJail will create a block containing mini blocks with jail txs using provided nodes. +// Block will be committed + call to validator system sc will be made to jail all nodes +func (tmp *TestMetaProcessor) ProcessJail(t *testing.T, blsKeys [][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + scrs := tmp.doJail(t, blsKeys) + txHashes := tmp.addTxsToCacher(scrs) + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doJail( + t *testing.T, + blsKeys [][]byte, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.JailingAddress, + Arguments: blsKeys, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "jail", + } + + return tmp.runSC(t, arguments) +} + +// ProcessUnJail will create a block containing mini blocks with unJail txs using provided nodes. +// Block will be committed + call to validator system sc will be made to unJail all nodes +func (tmp *TestMetaProcessor) ProcessUnJail(t *testing.T, blsKeys [][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for _, blsKey := range blsKeys { + scrs := tmp.doUnJail(t, blsKey) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +// ClearStoredMbs clears the stored miniblocks +func (tmp *TestMetaProcessor) ClearStoredMbs() { + txCoordMock, _ := tmp.TxCoordinator.(*testscommon.TransactionCoordinatorMock) + txCoordMock.ClearStoredMbs() +} + +func (tmp *TestMetaProcessor) doUnJail( + t *testing.T, + blsKey []byte, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.ValidatorSCAddress, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "unJail", + } + + return tmp.runSC(t, arguments) +} + +func (tmp *TestMetaProcessor) addTxsToCacher(scrs map[string]*smartContractResult.SmartContractResult) [][]byte { + txHashes := make([][]byte, 0) + for scrHash, scr := range scrs { + txHashes = append(txHashes, []byte(scrHash)) + tmp.TxCacher.AddTx([]byte(scrHash), scr) + } + + return txHashes +} + +func (tmp *TestMetaProcessor) commitBlockTxs(t *testing.T, txHashes [][]byte, header data.HeaderHandler) { + _, err := tmp.AccountsAdapter.Commit() + require.Nil(t, err) + + miniBlocks := block.MiniBlockSlice{ + { + TxHashes: txHashes, + SenderShardID: core.MetachainShardId, + ReceiverShardID: core.MetachainShardId, + Type: block.SmartContractResultBlock, + }, + } + tmp.TxCoordinator.AddTxsFromMiniBlocks(miniBlocks) + tmp.createAndCommitBlock(t, header, noTime) + tmp.currentRound += 1 +} + +func (tmp *TestMetaProcessor) runSC(t *testing.T, arguments *vmcommon.ContractCallInput) map[string]*smartContractResult.SmartContractResult { + vmOutput, err := tmp.SystemVM.RunSmartContractCall(arguments) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + + err = integrationTests.ProcessSCOutputAccounts(vmOutput, tmp.AccountsAdapter) + require.Nil(t, err) + + return createSCRsFromStakingSCOutput(vmOutput, tmp.Marshaller) +} + +func createSCRsFromStakingSCOutput( + vmOutput *vmcommon.VMOutput, + marshaller marshal.Marshalizer, +) map[string]*smartContractResult.SmartContractResult { + allSCR := make(map[string]*smartContractResult.SmartContractResult) + parser := smartContract.NewArgumentParser() + outputAccounts := process.SortVMOutputInsideData(vmOutput) + for _, outAcc := range outputAccounts { + storageUpdates := process.GetSortedStorageUpdates(outAcc) + + if bytes.Equal(outAcc.Address, vm.StakingSCAddress) { + scrData := parser.CreateDataFromStorageUpdate(storageUpdates) + scr := &smartContractResult.SmartContractResult{ + RcvAddr: vm.StakingSCAddress, + Data: []byte(scrData), + } + scrBytes, _ := marshaller.Marshal(scr) + scrHash := hex.EncodeToString(scrBytes) + + allSCR[scrHash] = scr + } + } + + return allSCR +} diff --git a/integrationTests/vm/systemVM/stakingSC_test.go b/integrationTests/vm/systemVM/stakingSC_test.go index 69ad5d15a6e..75e958f926b 100644 --- a/integrationTests/vm/systemVM/stakingSC_test.go +++ b/integrationTests/vm/systemVM/stakingSC_test.go @@ -35,6 +35,9 @@ func TestStakingUnstakingAndUnbondingOnMultiShardEnvironment(t *testing.T) { StakingV2EnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 0a36b697516..deceb8fd58f 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -60,7 +60,9 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" @@ -139,8 +141,9 @@ type VMTestContext struct { ContractOwner VMTestAccount Contract VMTestAccount - TxCostHandler external.TransactionEvaluator - TxsLogsProcessor process.TransactionLogProcessor + TxCostHandler external.TransactionEvaluator + TxsLogsProcessor process.TransactionLogProcessor + FailedTxLogsAccumulator process.FailedTxLogsAccumulator } // Close - @@ -316,17 +319,12 @@ func CreateAccount(accnts state.AccountsAdapter, pubKey []byte, nonce uint64, ba return hashCreated, nil } -func createEconomicsData(enableEpochsConfig config.EnableEpochs) (process.EconomicsDataHandler, error) { +func createEconomicsData(enableEpochsConfig config.EnableEpochs, gasPriceModifier float64) (process.EconomicsDataHandler, error) { maxGasLimitPerBlock := strconv.FormatUint(math.MaxUint64, 10) minGasPrice := strconv.FormatUint(1, 10) minGasLimit := strconv.FormatUint(1, 10) testProtocolSustainabilityAddress := "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" - builtInCost, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - ArgsParser: smartContract.NewArgumentParser(), - GasSchedule: mock.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - }) - realEpochNotifier := forking.NewGenericEpochNotifier() enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, realEpochNotifier) @@ -367,14 +365,13 @@ func createEconomicsData(enableEpochsConfig config.EnableEpochs) (process.Econom }, MinGasPrice: minGasPrice, GasPerDataByte: "1", - GasPriceModifier: 1.0, + GasPriceModifier: gasPriceModifier, MaxGasPriceSetGuardian: "2000000000", }, }, - EpochNotifier: realEpochNotifier, - EnableEpochsHandler: enableEpochsHandler, - BuiltInFunctionsCostHandler: builtInCost, - TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), + EpochNotifier: realEpochNotifier, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), } return economics.NewEconomicsData(argsNewEconomicsData) @@ -442,10 +439,11 @@ func CreateTxProcessorWithOneSCExecutorMockVM( } txTypeHandler, _ := coordinator.NewTxTypeHandler(argsTxTypeHandler) - economicsData, err := createEconomicsData(enableEpochsConfig) + economicsData, err := createEconomicsData(enableEpochsConfig, 1) if err != nil { return nil, err } + _ = economicsData.SetTxTypeHandler(txTypeHandler) argsNewSCProcessor := scrCommon.ArgsNewSmartContractProcessor{ VmContainer: vmContainer, @@ -465,12 +463,13 @@ func CreateTxProcessorWithOneSCExecutorMockVM( GasHandler: &testscommon.GasHandlerStub{ SetGasRefundedCalled: func(gasRefunded uint64, hash []byte) {}, }, - GasSchedule: gasScheduleNotifier, - TxLogsProcessor: &mock.TxLogsProcessorStub{}, - EnableEpochsHandler: enableEpochsHandler, - EnableRoundsHandler: enableRoundsHandler, - VMOutputCacher: txcache.NewDisabledCache(), - WasmVMChangeLocker: wasmVMChangeLocker, + GasSchedule: gasScheduleNotifier, + TxLogsProcessor: &mock.TxLogsProcessorStub{}, + EnableEpochsHandler: enableEpochsHandler, + EnableRoundsHandler: enableRoundsHandler, + VMOutputCacher: txcache.NewDisabledCache(), + WasmVMChangeLocker: wasmVMChangeLocker, + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } scProcessor, _ := processProxy.NewTestSmartContractProcessorProxy(argsNewSCProcessor, genericEpochNotifier) @@ -481,25 +480,27 @@ func CreateTxProcessorWithOneSCExecutorMockVM( } argsNewTxProcessor := transaction.ArgsNewTxProcessor{ - Accounts: accnts, - Hasher: integrationtests.TestHasher, - PubkeyConv: pubkeyConv, - Marshalizer: integrationtests.TestMarshalizer, - SignMarshalizer: integrationtests.TestMarshalizer, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), - ScProcessor: scProcessor, - TxFeeHandler: &testscommon.UnsignedTxHandlerStub{}, - TxTypeHandler: txTypeHandler, - EconomicsFee: economicsData, - ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, - BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, - ArgsParser: smartContract.NewArgumentParser(), - ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, - EnableRoundsHandler: enableRoundsHandler, - EnableEpochsHandler: enableEpochsHandler, - TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), - GuardianChecker: guardedAccountHandler, - TxLogsProcessor: &mock.TxLogsProcessorStub{}, + Accounts: accnts, + Hasher: integrationtests.TestHasher, + PubkeyConv: pubkeyConv, + Marshalizer: integrationtests.TestMarshalizer, + SignMarshalizer: integrationtests.TestMarshalizer, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), + ScProcessor: scProcessor, + TxFeeHandler: &testscommon.UnsignedTxHandlerStub{}, + TxTypeHandler: txTypeHandler, + EconomicsFee: economicsData, + ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, + BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, + ArgsParser: smartContract.NewArgumentParser(), + ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, + EnableRoundsHandler: enableRoundsHandler, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), + GuardianChecker: guardedAccountHandler, + TxLogsProcessor: &mock.TxLogsProcessorStub{}, + RelayedTxV3Processor: &processMocks.RelayedTxV3ProcessorMock{}, + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } return transaction.NewTxProcessor(argsNewTxProcessor) @@ -614,6 +615,7 @@ func CreateVMAndBlockchainHookAndDataPool( WasmVMChangeLocker: wasmVMChangeLocker, ESDTTransferParser: esdtTransferParser, Hasher: integrationtests.TestHasher, + PubKeyConverter: pubkeyConv, } vmFactory, err := shard.NewVMContainerFactory(argsNewVMFactory) if err != nil { @@ -690,7 +692,7 @@ func CreateVMAndBlockchainHookMeta( MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } - economicsData, err := createEconomicsData(config.EnableEpochs{}) + economicsData, err := createEconomicsData(config.EnableEpochs{}, 1) if err != nil { log.LogIfError(err) } @@ -702,7 +704,8 @@ func CreateVMAndBlockchainHookMeta( Economics: economicsData, MessageSignVerifier: &mock.MessageSignVerifierMock{}, GasSchedule: gasSchedule, - NodesConfigProvider: &mock.NodesSetupStub{}, + ArgBlockChainHook: args, + NodesConfigProvider: &genesisMocks.NodesSetupStub{}, Hasher: integrationtests.TestHasher, Marshalizer: integrationtests.TestMarshalizer, SystemSCConfig: createSystemSCConfig(), @@ -711,6 +714,7 @@ func CreateVMAndBlockchainHookMeta( ChanceComputer: &shardingMocks.NodesCoordinatorMock{}, ShardCoordinator: mock.NewMultiShardsCoordinatorMock(1), EnableEpochsHandler: enableEpochsHandler, + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, } vmFactory, err := metachain.NewVMContainerFactory(argVMContainer) if err != nil { @@ -765,6 +769,8 @@ func createSystemSCConfig() *config.SystemSmartContractsConfig { BleedPercentagePerRound: 0.00001, MaxNumberOfNodesForStake: 36, ActivateBLSPubKeyMessageVerification: false, + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "1250000000000000000000", @@ -775,6 +781,12 @@ func createSystemSCConfig() *config.SystemSmartContractsConfig { MinServiceFee: 1, MaxServiceFee: 20, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, } } @@ -792,17 +804,19 @@ func CreateVMConfigWithVersion(version string) *config.VirtualMachineConfig { }, }, TimeOutForSCExecutionInMilliseconds: 10000, // 10 seconds + TransferAndExecuteByUserAddresses: []string{"3132333435363738393031323334353637383930313233343536373839303234"}, } } // ResultsCreateTxProcessor is the struct that will hold all needed processor instances type ResultsCreateTxProcessor struct { - TxProc process.TransactionProcessor - SCProc scrCommon.TestSmartContractProcessor - IntermediateTxProc process.IntermediateTransactionHandler - EconomicsHandler process.EconomicsDataHandler - CostHandler external.TransactionEvaluator - TxLogProc process.TransactionLogProcessor + TxProc process.TransactionProcessor + SCProc scrCommon.TestSmartContractProcessor + IntermediateTxProc process.IntermediateTransactionHandler + EconomicsHandler process.EconomicsDataHandler + CostHandler external.TransactionEvaluator + TxLogProc process.TransactionLogProcessor + FailedTxLogsAccumulator process.FailedTxLogsAccumulator } // CreateTxProcessorWithOneSCExecutorWithVMs - @@ -819,6 +833,8 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( epochNotifierInstance process.EpochNotifier, guardianChecker process.GuardianChecker, roundNotifierInstance process.RoundNotifier, + chainHandler data.ChainHandler, + gasPriceModifier float64, ) (*ResultsCreateTxProcessor, error) { if check.IfNil(poolsHolder) { poolsHolder = dataRetrieverMock.NewPoolsHolderMock() @@ -841,10 +857,11 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( gasSchedule := make(map[string]map[string]uint64) defaults.FillGasMapInternal(gasSchedule, 1) - economicsData, err := createEconomicsData(enableEpochsConfig) + economicsData, err := createEconomicsData(enableEpochsConfig, gasPriceModifier) if err != nil { return nil, err } + _ = economicsData.SetTxTypeHandler(txTypeHandler) gasComp, err := preprocess.NewGasComputation(economicsData, txTypeHandler, enableEpochsHandler) if err != nil { @@ -856,53 +873,58 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( Marshalizer: integrationtests.TestMarshalizer, }) + failedLogsAcc := transactionLog.NewFailedTxLogsAccumulator() + intermediateTxHandler := &mock.IntermediateTransactionHandlerMock{} argsNewSCProcessor := scrCommon.ArgsNewSmartContractProcessor{ - VmContainer: vmContainer, - ArgsParser: smartContract.NewArgumentParser(), - Hasher: integrationtests.TestHasher, - Marshalizer: integrationtests.TestMarshalizer, - AccountsDB: accnts, - BlockChainHook: blockChainHook, - BuiltInFunctions: blockChainHook.GetBuiltinFunctionsContainer(), - PubkeyConv: pubkeyConv, - ShardCoordinator: shardCoordinator, - ScrForwarder: intermediateTxHandler, - BadTxForwarder: intermediateTxHandler, - TxFeeHandler: feeAccumulator, - EconomicsFee: economicsData, - TxTypeHandler: txTypeHandler, - GasHandler: gasComp, - GasSchedule: mock.NewGasScheduleNotifierMock(gasSchedule), - TxLogsProcessor: logProc, - EnableRoundsHandler: enableRoundsHandler, - EnableEpochsHandler: enableEpochsHandler, - WasmVMChangeLocker: wasmVMChangeLocker, - VMOutputCacher: txcache.NewDisabledCache(), + VmContainer: vmContainer, + ArgsParser: smartContract.NewArgumentParser(), + Hasher: integrationtests.TestHasher, + Marshalizer: integrationtests.TestMarshalizer, + AccountsDB: accnts, + BlockChainHook: blockChainHook, + BuiltInFunctions: blockChainHook.GetBuiltinFunctionsContainer(), + PubkeyConv: pubkeyConv, + ShardCoordinator: shardCoordinator, + ScrForwarder: intermediateTxHandler, + BadTxForwarder: intermediateTxHandler, + TxFeeHandler: feeAccumulator, + EconomicsFee: economicsData, + TxTypeHandler: txTypeHandler, + GasHandler: gasComp, + GasSchedule: mock.NewGasScheduleNotifierMock(gasSchedule), + TxLogsProcessor: logProc, + EnableRoundsHandler: enableRoundsHandler, + EnableEpochsHandler: enableEpochsHandler, + WasmVMChangeLocker: wasmVMChangeLocker, + VMOutputCacher: txcache.NewDisabledCache(), + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } scProcessorProxy, _ := processProxy.NewTestSmartContractProcessorProxy(argsNewSCProcessor, epochNotifierInstance) argsNewTxProcessor := transaction.ArgsNewTxProcessor{ - Accounts: accnts, - Hasher: integrationtests.TestHasher, - PubkeyConv: pubkeyConv, - Marshalizer: integrationtests.TestMarshalizer, - SignMarshalizer: integrationtests.TestMarshalizer, - ShardCoordinator: shardCoordinator, - ScProcessor: scProcessorProxy, - TxFeeHandler: feeAccumulator, - TxTypeHandler: txTypeHandler, - EconomicsFee: economicsData, - ReceiptForwarder: intermediateTxHandler, - BadTxForwarder: intermediateTxHandler, - ArgsParser: smartContract.NewArgumentParser(), - ScrForwarder: intermediateTxHandler, - EnableRoundsHandler: enableRoundsHandler, - EnableEpochsHandler: enableEpochsHandler, - TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), - GuardianChecker: guardianChecker, - TxLogsProcessor: logProc, + Accounts: accnts, + Hasher: integrationtests.TestHasher, + PubkeyConv: pubkeyConv, + Marshalizer: integrationtests.TestMarshalizer, + SignMarshalizer: integrationtests.TestMarshalizer, + ShardCoordinator: shardCoordinator, + ScProcessor: scProcessorProxy, + TxFeeHandler: feeAccumulator, + TxTypeHandler: txTypeHandler, + EconomicsFee: economicsData, + ReceiptForwarder: intermediateTxHandler, + BadTxForwarder: intermediateTxHandler, + ArgsParser: smartContract.NewArgumentParser(), + ScrForwarder: intermediateTxHandler, + EnableRoundsHandler: enableRoundsHandler, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), + GuardianChecker: guardianChecker, + TxLogsProcessor: logProc, + FailedTxLogsAccumulator: failedLogsAcc, + RelayedTxV3Processor: &processMocks.RelayedTxV3ProcessorMock{}, } txProcessor, err := transaction.NewTxProcessor(argsNewTxProcessor) if err != nil { @@ -981,6 +1003,7 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( Marshalizer: integrationtests.TestMarshalizer, Hasher: integrationtests.TestHasher, DataFieldParser: dataFieldParser, + BlockChainHook: blockChainHook, } argsNewSCProcessor.VMOutputCacher = txSimulatorProcessorArgs.VMOutputCacher @@ -1007,6 +1030,7 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( Accounts: simulationAccountsDB, ShardCoordinator: shardCoordinator, EnableEpochsHandler: argsNewSCProcessor.EnableEpochsHandler, + BlockChain: chainHandler, } apiTransactionEvaluator, err := transactionEvaluator.NewAPITransactionEvaluator(argsTransactionEvaluator) if err != nil { @@ -1078,7 +1102,7 @@ func CreatePreparedTxProcessorAndAccountsWithVMs( senderAddressBytes, senderBalance, enableEpochsConfig, - integrationTests.GetDefaultRoundsConfig()) + testscommon.GetDefaultRoundsConfig()) } // CreatePreparedTxProcessorAndAccountsWithVMsWithRoundsConfig - @@ -1089,7 +1113,7 @@ func CreatePreparedTxProcessorAndAccountsWithVMsWithRoundsConfig( enableEpochsConfig config.EnableEpochs, roundsConfig config.RoundConfig, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() _, _ = CreateAccount(accounts, senderAddressBytes, senderNonce, senderBalance) vmConfig := createDefaultVMConfig() @@ -1129,6 +1153,8 @@ func CreatePreparedTxProcessorAndAccountsWithVMsWithRoundsConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, + 1, ) if err != nil { return nil, err @@ -1162,36 +1188,48 @@ func createMockGasScheduleNotifierWithCustomGasSchedule(updateGasSchedule func(g } // CreatePreparedTxProcessorWithVMs - -func CreatePreparedTxProcessorWithVMs(enableEpochs config.EnableEpochs) (*VMTestContext, error) { - return CreatePreparedTxProcessorWithVMsAndCustomGasSchedule(enableEpochs, func(gasMap wasmConfig.GasScheduleMap) {}) +func CreatePreparedTxProcessorWithVMs(enableEpochs config.EnableEpochs, gasPriceModifier float64) (*VMTestContext, error) { + return CreatePreparedTxProcessorWithVMsAndCustomGasSchedule(enableEpochs, func(gasMap wasmConfig.GasScheduleMap) {}, gasPriceModifier) } // CreatePreparedTxProcessorWithVMsAndCustomGasSchedule - func CreatePreparedTxProcessorWithVMsAndCustomGasSchedule( enableEpochs config.EnableEpochs, - updateGasSchedule func(gasMap wasmConfig.GasScheduleMap)) (*VMTestContext, error) { + updateGasSchedule func(gasMap wasmConfig.GasScheduleMap), + gasPriceModifier float64) (*VMTestContext, error) { return CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGasAndRoundConfig( enableEpochs, mock.NewMultiShardsCoordinatorMock(2), integrationtests.CreateMemUnit(), createMockGasScheduleNotifierWithCustomGasSchedule(updateGasSchedule), - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), + gasPriceModifier, ) } // CreatePreparedTxProcessorWithVMsWithShardCoordinator - -func CreatePreparedTxProcessorWithVMsWithShardCoordinator(enableEpochsConfig config.EnableEpochs, shardCoordinator sharding.Coordinator) (*VMTestContext, error) { - return CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig(enableEpochsConfig, integrationTests.GetDefaultRoundsConfig(), shardCoordinator) +func CreatePreparedTxProcessorWithVMsWithShardCoordinator( + enableEpochsConfig config.EnableEpochs, + shardCoordinator sharding.Coordinator, + gasPriceModifier float64, +) (*VMTestContext, error) { + return CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig(enableEpochsConfig, testscommon.GetDefaultRoundsConfig(), shardCoordinator, gasPriceModifier) } // CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig - -func CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig(enableEpochsConfig config.EnableEpochs, roundsConfig config.RoundConfig, shardCoordinator sharding.Coordinator) (*VMTestContext, error) { +func CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig( + enableEpochsConfig config.EnableEpochs, + roundsConfig config.RoundConfig, + shardCoordinator sharding.Coordinator, + gasPriceModifier float64, +) (*VMTestContext, error) { return CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGasAndRoundConfig( enableEpochsConfig, shardCoordinator, integrationtests.CreateMemUnit(), CreateMockGasScheduleNotifier(), roundsConfig, + gasPriceModifier, ) } @@ -1201,6 +1239,7 @@ func CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( shardCoordinator sharding.Coordinator, db storage.Storer, gasScheduleNotifier core.GasScheduleNotifier, + gasPriceModifier float64, ) (*VMTestContext, error) { vmConfig := createDefaultVMConfig() return CreatePreparedTxProcessorWithVMConfigWithShardCoordinatorDBAndGasAndRoundConfig( @@ -1208,8 +1247,9 @@ func CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( shardCoordinator, db, gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, + gasPriceModifier, ) } @@ -1220,6 +1260,7 @@ func CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGasAndRoundConfig( db storage.Storer, gasScheduleNotifier core.GasScheduleNotifier, roundsConfig config.RoundConfig, + gasPriceModifier float64, ) (*VMTestContext, error) { vmConfig := createDefaultVMConfig() return CreatePreparedTxProcessorWithVMConfigWithShardCoordinatorDBAndGasAndRoundConfig( @@ -1229,6 +1270,7 @@ func CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGasAndRoundConfig( gasScheduleNotifier, roundsConfig, vmConfig, + gasPriceModifier, ) } @@ -1240,8 +1282,9 @@ func CreatePreparedTxProcessorWithVMConfigWithShardCoordinatorDBAndGasAndRoundCo gasScheduleNotifier core.GasScheduleNotifier, roundsConfig config.RoundConfig, vmConfig *config.VirtualMachineConfig, + gasPriceModifier float64, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() epochNotifierInstance := forking.NewGenericEpochNotifier() enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, epochNotifierInstance) accounts := integrationtests.CreateAccountsDB(db, enableEpochsHandler) @@ -1280,29 +1323,32 @@ func CreatePreparedTxProcessorWithVMConfigWithShardCoordinatorDBAndGasAndRoundCo epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, + gasPriceModifier, ) if err != nil { return nil, err } return &VMTestContext{ - TxProcessor: res.TxProc, - ScProcessor: res.SCProc, - Accounts: accounts, - BlockchainHook: blockchainHook, - VMContainer: vmContainer, - TxFeeHandler: feeAccumulator, - ScForwarder: res.IntermediateTxProc, - ShardCoordinator: shardCoordinator, - EconomicsData: res.EconomicsHandler, - TxCostHandler: res.CostHandler, - TxsLogsProcessor: res.TxLogProc, - GasSchedule: gasScheduleNotifier, - EpochNotifier: epochNotifierInstance, - EnableEpochsHandler: enableEpochsHandler, - ChainHandler: chainHandler, - Marshalizer: integrationtests.TestMarshalizer, - GuardedAccountsHandler: guardedAccountHandler, + TxProcessor: res.TxProc, + ScProcessor: res.SCProc, + Accounts: accounts, + BlockchainHook: blockchainHook, + VMContainer: vmContainer, + TxFeeHandler: feeAccumulator, + ScForwarder: res.IntermediateTxProc, + ShardCoordinator: shardCoordinator, + EconomicsData: res.EconomicsHandler, + TxCostHandler: res.CostHandler, + TxsLogsProcessor: res.TxLogProc, + FailedTxLogsAccumulator: res.FailedTxLogsAccumulator, + GasSchedule: gasScheduleNotifier, + EpochNotifier: epochNotifierInstance, + EnableEpochsHandler: enableEpochsHandler, + ChainHandler: chainHandler, + Marshalizer: integrationtests.TestMarshalizer, + GuardedAccountsHandler: guardedAccountHandler, }, nil } @@ -1320,7 +1366,7 @@ func CreateTxProcessorWasmVMWithGasSchedule( senderBalance, gasScheduleMap, enableEpochsConfig, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), ) } @@ -1333,7 +1379,7 @@ func CreateTxProcessorArwenVMWithGasScheduleAndRoundConfig( enableEpochsConfig config.EnableEpochs, roundsConfig config.RoundConfig, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() _, _ = CreateAccount(accounts, senderAddressBytes, senderNonce, senderBalance) vmConfig := createDefaultVMConfig() @@ -1375,6 +1421,8 @@ func CreateTxProcessorArwenVMWithGasScheduleAndRoundConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, + 1, ) if err != nil { return nil, err @@ -1404,7 +1452,7 @@ func CreateTxProcessorWasmVMWithVMConfig( ) (*VMTestContext, error) { return CreateTxProcessorArwenWithVMConfigAndRoundConfig( enableEpochsConfig, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, gasSchedule, ) @@ -1417,7 +1465,7 @@ func CreateTxProcessorArwenWithVMConfigAndRoundConfig( vmConfig *config.VirtualMachineConfig, gasSchedule map[string]map[string]uint64, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() wasmVMChangeLocker := &sync.RWMutex{} gasScheduleNotifier := mock.NewGasScheduleNotifierMock(gasSchedule) @@ -1456,6 +1504,8 @@ func CreateTxProcessorArwenWithVMConfigAndRoundConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, + 1, ) if err != nil { return nil, err @@ -1493,7 +1543,7 @@ func CreatePreparedTxProcessorAndAccountsWithMockedVM( senderAddressBytes, senderBalance, enableEpochs, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), wasmVMChangeLocker, ) } @@ -1823,13 +1873,13 @@ func GetNodeIndex(nodeList []*integrationTests.TestProcessorNode, node *integrat } // CreatePreparedTxProcessorWithVMsMultiShard - -func CreatePreparedTxProcessorWithVMsMultiShard(selfShardID uint32, enableEpochsConfig config.EnableEpochs) (*VMTestContext, error) { - return CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(selfShardID, enableEpochsConfig, integrationTests.GetDefaultRoundsConfig()) +func CreatePreparedTxProcessorWithVMsMultiShard(selfShardID uint32, enableEpochsConfig config.EnableEpochs, gasPriceModifier float64) (*VMTestContext, error) { + return CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(selfShardID, enableEpochsConfig, testscommon.GetDefaultRoundsConfig(), gasPriceModifier) } // CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig - -func CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(selfShardID uint32, enableEpochsConfig config.EnableEpochs, roundsConfig config.RoundConfig) (*VMTestContext, error) { - return CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig(selfShardID, enableEpochsConfig, roundsConfig, createDefaultVMConfig()) +func CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(selfShardID uint32, enableEpochsConfig config.EnableEpochs, roundsConfig config.RoundConfig, gasPriceModifier float64) (*VMTestContext, error) { + return CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig(selfShardID, enableEpochsConfig, roundsConfig, createDefaultVMConfig(), gasPriceModifier) } // CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig - @@ -1838,10 +1888,11 @@ func CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( enableEpochsConfig config.EnableEpochs, roundsConfig config.RoundConfig, vmConfig *config.VirtualMachineConfig, + gasPriceModifier float64, ) (*VMTestContext, error) { shardCoordinator, _ := sharding.NewMultiShardCoordinator(3, selfShardID) - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() wasmVMChangeLocker := &sync.RWMutex{} @@ -1886,27 +1937,30 @@ func CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, + gasPriceModifier, ) if err != nil { return nil, err } return &VMTestContext{ - TxProcessor: res.TxProc, - ScProcessor: res.SCProc, - Accounts: accounts, - BlockchainHook: blockchainHook, - VMContainer: vmContainer, - TxFeeHandler: feeAccumulator, - ShardCoordinator: shardCoordinator, - ScForwarder: res.IntermediateTxProc, - EconomicsData: res.EconomicsHandler, - Marshalizer: integrationtests.TestMarshalizer, - TxsLogsProcessor: res.TxLogProc, - EpochNotifier: epochNotifierInstance, - EnableEpochsHandler: enableEpochsHandler, - ChainHandler: chainHandler, - GuardedAccountsHandler: guardedAccountHandler, + TxProcessor: res.TxProc, + ScProcessor: res.SCProc, + Accounts: accounts, + BlockchainHook: blockchainHook, + VMContainer: vmContainer, + TxFeeHandler: feeAccumulator, + ShardCoordinator: shardCoordinator, + ScForwarder: res.IntermediateTxProc, + EconomicsData: res.EconomicsHandler, + Marshalizer: integrationtests.TestMarshalizer, + TxsLogsProcessor: res.TxLogProc, + FailedTxLogsAccumulator: res.FailedTxLogsAccumulator, + EpochNotifier: epochNotifierInstance, + EnableEpochsHandler: enableEpochsHandler, + ChainHandler: chainHandler, + GuardedAccountsHandler: guardedAccountHandler, }, nil } diff --git a/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go b/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go index e5b6661d02e..8f3894aa319 100644 --- a/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go +++ b/integrationTests/vm/txsFee/apiTransactionEvaluator_test.go @@ -27,12 +27,10 @@ func getZeroGasAndFees() scheduled.GasAndFees { func TestSCCallCostTransactionCost(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ - DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -40,8 +38,8 @@ func TestSCCallCostTransactionCost(t *testing.T) { utils.CleanAccumulatedIntermediateTransactions(t, testContext) sndAddr := []byte("12345678901234567890123456789112") - senderBalance := big.NewInt(100000) - gasLimit := uint64(1000) + senderBalance := big.NewInt(100000000000) + gasLimit := uint64(10000000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -49,15 +47,15 @@ func TestSCCallCostTransactionCost(t *testing.T) { res, err := testContext.TxCostHandler.ComputeTransactionGasLimit(tx) require.Nil(t, err) - require.Equal(t, uint64(418), res.GasUnits) + require.Equal(t, uint64(15704), res.GasUnits) } func TestScDeployTransactionCost(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -74,10 +72,10 @@ func TestScDeployTransactionCost(t *testing.T) { func TestAsyncCallsTransactionCost(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -105,13 +103,13 @@ func TestAsyncCallsTransactionCost(t *testing.T) { func TestBuiltInFunctionTransactionCost(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs( config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, - }) + }, 1) require.Nil(t, err) defer testContext.Close() @@ -131,10 +129,10 @@ func TestBuiltInFunctionTransactionCost(t *testing.T) { func TestESDTTransfer(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -154,12 +152,12 @@ func TestESDTTransfer(t *testing.T) { func TestAsyncESDTTransfer(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + }, 1) require.Nil(t, err) defer testContext.Close() @@ -194,5 +192,5 @@ func TestAsyncESDTTransfer(t *testing.T) { res, err := testContext.TxCostHandler.ComputeTransactionGasLimit(tx) require.Nil(t, err) - require.Equal(t, uint64(34157), res.GasUnits) + require.Equal(t, uint64(177653), res.GasUnits) } diff --git a/integrationTests/vm/txsFee/asyncCall_multi_test.go b/integrationTests/vm/txsFee/asyncCall_multi_test.go index 289f440efa3..56a6dc02a26 100644 --- a/integrationTests/vm/txsFee/asyncCall_multi_test.go +++ b/integrationTests/vm/txsFee/asyncCall_multi_test.go @@ -1,5 +1,3 @@ -//go:build !race - package txsFee import ( @@ -9,7 +7,6 @@ import ( "github.com/multiversx/mx-chain-core-go/data/scheduled" "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/state" @@ -23,7 +20,11 @@ var egldBalance = big.NewInt(50000000000) var esdtBalance = big.NewInt(100) func TestAsyncCallLegacy(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -66,7 +67,11 @@ func TestAsyncCallLegacy(t *testing.T) { } func TestAsyncCallMulti(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -113,7 +118,11 @@ func TestAsyncCallMulti(t *testing.T) { } func TestAsyncCallTransferAndExecute(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -164,13 +173,17 @@ func TestAsyncCallTransferAndExecute(t *testing.T) { } func TestAsyncCallTransferESDTAndExecute_Success(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + numberOfCallsFromParent := 3 numberOfBackTransfers := 2 transferESDTAndExecute(t, numberOfCallsFromParent, numberOfBackTransfers) } func transferESDTAndExecute(t *testing.T, numberOfCallsFromParent int, numberOfBackTransfers int) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -280,15 +293,19 @@ func deployForwarderAndTestContract( } func TestAsyncCallMulti_CrossShard(t *testing.T) { - testContextFirstContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextFirstContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextFirstContract.Close() - testContextSecondContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + testContextSecondContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextSecondContract.Close() - testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}) + testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextSender.Close() @@ -366,15 +383,19 @@ func TestAsyncCallMulti_CrossShard(t *testing.T) { } func TestAsyncCallTransferAndExecute_CrossShard(t *testing.T) { - childShard, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + childShard, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}, 1) require.Nil(t, err) defer childShard.Close() - forwarderShard, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + forwarderShard, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer forwarderShard.Close() - testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}) + testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextSender.Close() @@ -448,27 +469,25 @@ func TestAsyncCallTransferAndExecute_CrossShard(t *testing.T) { } func TestAsyncCallTransferESDTAndExecute_CrossShard_Success(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + numberOfCallsFromParent := 3 numberOfBackTransfers := 2 transferESDTAndExecuteCrossShard(t, numberOfCallsFromParent, numberOfBackTransfers) } func transferESDTAndExecuteCrossShard(t *testing.T, numberOfCallsFromParent int, numberOfBackTransfers int) { - vaultShard, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{ - DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + vaultShard, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}, 1) require.Nil(t, err) defer vaultShard.Close() - forwarderShard, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{ - DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + forwarderShard, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer forwarderShard.Close() - testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{ - DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextSender.Close() @@ -513,7 +532,7 @@ func transferESDTAndExecuteCrossShard(t *testing.T, numberOfCallsFromParent int, Clear(). Func("add_queued_call_transfer_esdt"). Bytes(vaultSCAddress). - Int64(50000). + Int64(500000). Bytes([]byte("retrieve_funds_promises")). Bytes(esdtToken). Int64(esdtToTransferFromParent). @@ -532,7 +551,7 @@ func transferESDTAndExecuteCrossShard(t *testing.T, numberOfCallsFromParent int, Clear(). Func("forward_queued_calls") - gasLimit = uint64(50000000) + gasLimit = uint64(100000000) sendTx(nonce, senderAddr, forwarderSCAddress, gasPrice, gasLimit, txBuilderRunQueue, forwarderShard, t) intermediateTxs := forwarderShard.GetIntermediateTransactions(t) diff --git a/integrationTests/vm/txsFee/asyncCall_test.go b/integrationTests/vm/txsFee/asyncCall_test.go index cedf9ad825b..88057f564a7 100644 --- a/integrationTests/vm/txsFee/asyncCall_test.go +++ b/integrationTests/vm/txsFee/asyncCall_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -22,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" vmcommon "github.com/multiversx/mx-chain-vm-common-go" wasmConfig "github.com/multiversx/mx-chain-vm-go/config" @@ -32,7 +29,11 @@ import ( const upgradeContractFunction = "upgradeContract" func TestAsyncCallShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -84,12 +85,16 @@ func TestAsyncCallShouldWork(t *testing.T) { } func TestMinterContractWithAsyncCalls(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsAndCustomGasSchedule(config.EnableEpochs{}, func(gasMap wasmConfig.GasScheduleMap) { // if `MaxBuiltInCallsPerTx` is 200 test will fail gasMap[common.MaxPerTransaction]["MaxBuiltInCallsPerTx"] = 199 gasMap[common.MaxPerTransaction]["MaxNumberOfTransfersPerTx"] = 100000 gasMap[common.MaxPerTransaction]["MaxNumberOfTrieReadsPerTx"] = 100000 - }) + }, 1) require.Nil(t, err) defer testContext.Close() @@ -140,6 +145,10 @@ func TestMinterContractWithAsyncCalls(t *testing.T) { } func TestAsyncCallsOnInitFunctionOnUpgrade(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + firstContractCode := wasm.GetSCCode("./testdata/first/output/first.wasm") newContractCode := wasm.GetSCCode("./testdata/asyncOnInit/asyncOnInitAndUpgrade.wasm") @@ -191,8 +200,9 @@ func testAsyncCallsOnInitFunctionOnUpgrade( shardCoordinatorForShard1, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), + 1, ) require.Nil(t, err) testContextShardMeta, err := vm.CreatePreparedTxProcessorWithVMConfigWithShardCoordinatorDBAndGasAndRoundConfig( @@ -200,8 +210,9 @@ func testAsyncCallsOnInitFunctionOnUpgrade( shardCoordinatorForShardMeta, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), + 1, ) require.Nil(t, err) @@ -275,6 +286,10 @@ func testAsyncCallsOnInitFunctionOnUpgrade( } func TestAsyncCallsOnInitFunctionOnDeploy(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + firstSCCode := wasm.GetSCCode("./testdata/first/output/first.wasm") pathToSecondSC := "./testdata/asyncOnInit/asyncOnInitAndUpgrade.wasm" secondSCCode := wasm.GetSCCode(pathToSecondSC) @@ -325,8 +340,9 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, shardCoordinatorForShard1, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), + 1, ) require.Nil(t, err) testContextShardMeta, err := vm.CreatePreparedTxProcessorWithVMConfigWithShardCoordinatorDBAndGasAndRoundConfig( @@ -334,8 +350,9 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, shardCoordinatorForShardMeta, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), + 1, ) require.Nil(t, err) diff --git a/integrationTests/vm/txsFee/asyncESDT_test.go b/integrationTests/vm/txsFee/asyncESDT_test.go index 2c2dfce4c71..c7c8d088fb9 100644 --- a/integrationTests/vm/txsFee/asyncESDT_test.go +++ b/integrationTests/vm/txsFee/asyncESDT_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -25,9 +21,13 @@ import ( ) func TestAsyncESDTCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + }, 1) require.Nil(t, err) defer testContext.Close() @@ -70,16 +70,20 @@ func TestAsyncESDTCallShouldWork(t *testing.T) { utils.CheckESDTBalance(t, testContext, firstSCAddress, token, big.NewInt(2500)) utils.CheckESDTBalance(t, testContext, secondSCAddress, token, big.NewInt(2500)) - expectedSenderBalance := big.NewInt(95000000) + expectedSenderBalance := big.NewInt(98223470) utils.TestAccount(t, testContext.Accounts, sndAddr, 1, expectedSenderBalance) - expectedAccumulatedFees := big.NewInt(5000000) + expectedAccumulatedFees := big.NewInt(1776530) accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() require.Equal(t, expectedAccumulatedFees, accumulatedFees) } func TestAsyncESDTCallSecondScRefusesPayment(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -132,7 +136,11 @@ func TestAsyncESDTCallSecondScRefusesPayment(t *testing.T) { } func TestAsyncESDTCallsOutOfGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -184,9 +192,13 @@ func TestAsyncESDTCallsOutOfGas(t *testing.T) { } func TestAsyncMultiTransferOnCallback(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + }, 1) require.Nil(t, err) defer testContext.Close() @@ -279,7 +291,11 @@ func TestAsyncMultiTransferOnCallback(t *testing.T) { } func TestAsyncMultiTransferOnCallAndOnCallback(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -379,7 +395,11 @@ func TestAsyncMultiTransferOnCallAndOnCallback(t *testing.T) { } func TestSendNFTToContractWith0Function(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -428,7 +448,11 @@ func TestSendNFTToContractWith0Function(t *testing.T) { } func TestSendNFTToContractWith0FunctionNonPayable(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -478,7 +502,11 @@ func TestSendNFTToContractWith0FunctionNonPayable(t *testing.T) { } func TestAsyncESDTCallForThirdContractShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/backwardsCompatibility_test.go b/integrationTests/vm/txsFee/backwardsCompatibility_test.go index b4a73596edb..424594c6754 100644 --- a/integrationTests/vm/txsFee/backwardsCompatibility_test.go +++ b/integrationTests/vm/txsFee/backwardsCompatibility_test.go @@ -17,13 +17,16 @@ import ( // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceSelfShouldWorkAndConsumeTxFeeWhenAllFlagsAreDisabled(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ - PenalizedTooMuchGasEnableEpoch: 100, - BuiltInFunctionOnMetaEnableEpoch: 100, - SCDeployEnableEpoch: 100, - MetaProtectionEnableEpoch: 100, - RelayedTransactionsEnableEpoch: 100, - }) + PenalizedTooMuchGasEnableEpoch: 100, + SCDeployEnableEpoch: 100, + MetaProtectionEnableEpoch: 100, + RelayedTransactionsEnableEpoch: 100, + }, 1) require.Nil(t, err) defer testContext.Close() @@ -58,13 +61,17 @@ func TestMoveBalanceSelfShouldWorkAndConsumeTxFeeWhenAllFlagsAreDisabled(t *test // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceAllFlagsDisabledLessBalanceThanGasLimitMulGasPrice(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, BuiltInFunctionsEnableEpoch: integrationTests.UnreachableEpoch, SCDeployEnableEpoch: integrationTests.UnreachableEpoch, MetaProtectionEnableEpoch: integrationTests.UnreachableEpoch, RelayedTransactionsEnableEpoch: integrationTests.UnreachableEpoch, - }) + }, 1) require.Nil(t, err) defer testContext.Close() @@ -81,6 +88,10 @@ func TestMoveBalanceAllFlagsDisabledLessBalanceThanGasLimitMulGasPrice(t *testin } func TestMoveBalanceSelfShouldWorkAndConsumeTxFeeWhenSomeFlagsAreDisabled(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs( config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: 0, @@ -88,7 +99,7 @@ func TestMoveBalanceSelfShouldWorkAndConsumeTxFeeWhenSomeFlagsAreDisabled(t *tes SCDeployEnableEpoch: 100, MetaProtectionEnableEpoch: 100, RelayedTransactionsV2EnableEpoch: 100, - }) + }, 1) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/builtInFunctions_test.go b/integrationTests/vm/txsFee/builtInFunctions_test.go index 6a9b31bb674..0c7c1f7cdf3 100644 --- a/integrationTests/vm/txsFee/builtInFunctions_test.go +++ b/integrationTests/vm/txsFee/builtInFunctions_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -20,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" @@ -27,15 +24,19 @@ import ( ) func TestBuildInFunctionChangeOwnerCallShouldWorkV1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs( config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, - }) + }, 1) require.Nil(t, err) defer testContext.Close() - scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) utils.CleanAccumulatedIntermediateTransactions(t, testContext) @@ -53,7 +54,7 @@ func TestBuildInFunctionChangeOwnerCallShouldWorkV1(t *testing.T) { utils.CheckOwnerAddr(t, testContext, scAddress, newOwner) - expectedBalance := big.NewInt(87250) + expectedBalance := big.NewInt(9987250) vm.TestAccount(t, testContext.Accounts, owner, 2, expectedBalance) // check accumulated fees @@ -65,14 +66,18 @@ func TestBuildInFunctionChangeOwnerCallShouldWorkV1(t *testing.T) { } func TestBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs( config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, - }) + }, 1) require.Nil(t, err) defer testContext.Close() - scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) utils.CleanAccumulatedIntermediateTransactions(t, testContext) @@ -90,7 +95,7 @@ func TestBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { utils.CheckOwnerAddr(t, testContext, scAddress, newOwner) - expectedBalance := big.NewInt(78100) + expectedBalance := big.NewInt(9978100) vm.TestAccount(t, testContext.Accounts, owner, 2, expectedBalance) // check accumulated fees @@ -102,11 +107,15 @@ func TestBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { } func TestBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() - scAddress, initialOwner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, initialOwner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) utils.CleanAccumulatedIntermediateTransactions(t, testContext) testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) @@ -139,11 +148,15 @@ func TestBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *testing.T) } func TestBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() - scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) utils.CleanAccumulatedIntermediateTransactions(t, testContext) testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) @@ -161,7 +174,7 @@ func TestBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *testing.T) utils.CheckOwnerAddr(t, testContext, scAddress, owner) - expectedBalance := big.NewInt(78100) + expectedBalance := big.NewInt(9978100) vm.TestAccount(t, testContext.Accounts, owner, 2, expectedBalance) // check accumulated fees @@ -173,11 +186,15 @@ func TestBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *testing.T) } func TestBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() - scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) newOwner := []byte("12345678901234567890123456789112") @@ -197,7 +214,7 @@ func TestBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldNotConsumeGas(t utils.CheckOwnerAddr(t, testContext, scAddress, owner) - expectedBalance := big.NewInt(99070) + expectedBalance := big.NewInt(9999070) vm.TestAccount(t, testContext.Accounts, owner, 2, expectedBalance) // check accumulated fees @@ -209,11 +226,15 @@ func TestBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldNotConsumeGas(t } func TestBuildInFunctionChangeOwnerOutOfGasShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() - scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) utils.CleanAccumulatedIntermediateTransactions(t, testContext) testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) @@ -232,7 +253,7 @@ func TestBuildInFunctionChangeOwnerOutOfGasShouldConsumeGas(t *testing.T) { utils.CheckOwnerAddr(t, testContext, scAddress, owner) - expectedBalance := big.NewInt(87260) + expectedBalance := big.NewInt(9987260) vm.TestAccount(t, testContext.Accounts, owner, 2, expectedBalance) // check accumulated fees @@ -244,13 +265,17 @@ func TestBuildInFunctionChangeOwnerOutOfGasShouldConsumeGas(t *testing.T) { } func TestBuildInFunctionSaveKeyValue_WrongDestination(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + shardCoord, _ := sharding.NewMultiShardCoordinator(2, 0) testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinator( config.EnableEpochs{ CleanUpInformativeSCRsEnableEpoch: integrationTests.UnreachableEpoch, SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, - }, shardCoord) + }, shardCoord, 1) require.Nil(t, err) defer testContext.Close() @@ -279,12 +304,16 @@ func TestBuildInFunctionSaveKeyValue_WrongDestination(t *testing.T) { } func TestBuildInFunctionSaveKeyValue_NotEnoughGasFor3rdSave(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + shardCoord, _ := sharding.NewMultiShardCoordinator(2, 0) testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinator( config.EnableEpochs{ BackwardCompSaveKeyValueEnableEpoch: 5, - }, shardCoord) + }, shardCoord, 1) require.Nil(t, err) defer testContext.Close() @@ -307,6 +336,10 @@ func TestBuildInFunctionSaveKeyValue_NotEnoughGasFor3rdSave(t *testing.T) { } func TestBuildInFunctionSaveKeyValue_NotEnoughGasForTheSameKeyValue(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + shardCoord, _ := sharding.NewMultiShardCoordinator(2, 0) gasScheduleNotifier := vm.CreateMockGasScheduleNotifier() @@ -321,8 +354,9 @@ func TestBuildInFunctionSaveKeyValue_NotEnoughGasForTheSameKeyValue(t *testing.T shardCoord, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.5"), + 1, ) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/common.go b/integrationTests/vm/txsFee/common.go index 02e69b5260d..774af8202d2 100644 --- a/integrationTests/vm/txsFee/common.go +++ b/integrationTests/vm/txsFee/common.go @@ -1,14 +1,146 @@ package txsFee import ( + "bytes" + "encoding/hex" + "math/big" "testing" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/esdt" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/state" "github.com/stretchr/testify/require" ) +const ( + gasPrice = uint64(10) + minGasLimit = uint64(1) + gasPriceModifier = float64(0.1) +) + +// MetaData defines test meta data struct +type MetaData struct { + TokenId []byte + Nonce []byte + Name []byte + Royalties []byte + Hash []byte + Attributes []byte + Uris [][]byte +} + +// GetDefaultMetaData will return default meta data structure +func GetDefaultMetaData() *MetaData { + return &MetaData{ + TokenId: []byte(hex.EncodeToString([]byte("tokenId"))), + Nonce: []byte(hex.EncodeToString(big.NewInt(0).Bytes())), + Name: []byte(hex.EncodeToString([]byte("name"))), + Royalties: []byte(hex.EncodeToString(big.NewInt(10).Bytes())), + Hash: []byte(hex.EncodeToString([]byte("hash"))), + Attributes: []byte(hex.EncodeToString([]byte("attributes"))), + Uris: [][]byte{[]byte(hex.EncodeToString([]byte("uri1"))), []byte(hex.EncodeToString([]byte("uri2"))), []byte(hex.EncodeToString([]byte("uri3")))}, + } +} + +func getMetaDataFromAcc(t *testing.T, testContext *vm.VMTestContext, accWithMetaData []byte, token []byte) *esdt.MetaData { + account, err := testContext.Accounts.LoadAccount(accWithMetaData) + require.Nil(t, err) + userAccount, ok := account.(state.UserAccountHandler) + require.True(t, ok) + + key := append(token, big.NewInt(0).SetUint64(1).Bytes()...) + esdtDataBytes, _, err := userAccount.RetrieveValue(key) + require.Nil(t, err) + esdtData := &esdt.ESDigitalToken{} + err = testContext.Marshalizer.Unmarshal(esdtData, esdtDataBytes) + require.Nil(t, err) + + return esdtData.TokenMetaData +} + +func checkMetaData(t *testing.T, testContext *vm.VMTestContext, accWithMetaData []byte, token []byte, expectedMetaData *MetaData) { + retrievedMetaData := getMetaDataFromAcc(t, testContext, accWithMetaData, token) + + require.Equal(t, expectedMetaData.Nonce, []byte(hex.EncodeToString(big.NewInt(int64(retrievedMetaData.Nonce)).Bytes()))) + require.Equal(t, expectedMetaData.Name, []byte(hex.EncodeToString(retrievedMetaData.Name))) + require.Equal(t, expectedMetaData.Royalties, []byte(hex.EncodeToString(big.NewInt(int64(retrievedMetaData.Royalties)).Bytes()))) + require.Equal(t, expectedMetaData.Hash, []byte(hex.EncodeToString(retrievedMetaData.Hash))) + for i, uri := range expectedMetaData.Uris { + require.Equal(t, uri, []byte(hex.EncodeToString(retrievedMetaData.URIs[i]))) + } + require.Equal(t, expectedMetaData.Attributes, []byte(hex.EncodeToString(retrievedMetaData.Attributes))) +} + +func getDynamicTokenTypes() []string { + return []string{ + core.DynamicNFTESDT, + core.DynamicSFTESDT, + core.DynamicMetaESDT, + } +} + +func createTokenTx( + sndAddr []byte, + rcvAddr []byte, + gasLimit uint64, + quantity int64, + metaData *MetaData, +) *transaction.Transaction { + txDataField := bytes.Join( + [][]byte{ + []byte(core.BuiltInFunctionESDTNFTCreate), + metaData.TokenId, + []byte(hex.EncodeToString(big.NewInt(quantity).Bytes())), // quantity + metaData.Name, + metaData.Royalties, + metaData.Hash, + metaData.Attributes, + []byte(hex.EncodeToString([]byte("uri"))), + }, + []byte("@"), + ) + + return &transaction.Transaction{ + Nonce: 0, + SndAddr: sndAddr, + RcvAddr: rcvAddr, + GasLimit: gasLimit, + GasPrice: gasPrice, + Data: txDataField, + Value: big.NewInt(0), + } +} + +func setTokenTypeTx( + sndAddr []byte, + gasLimit uint64, + tokenId []byte, + tokenType string, +) *transaction.Transaction { + txDataField := bytes.Join( + [][]byte{ + []byte(core.ESDTSetTokenType), + []byte(hex.EncodeToString(tokenId)), + []byte(hex.EncodeToString([]byte(tokenType))), + }, + []byte("@"), + ) + + return &transaction.Transaction{ + Nonce: 0, + SndAddr: sndAddr, + RcvAddr: core.SystemAccountAddress, + GasLimit: gasLimit, + GasPrice: gasPrice, + + Data: txDataField, + Value: big.NewInt(0), + } +} + func getAccount(tb testing.TB, testContext *vm.VMTestContext, scAddress []byte) state.UserAccountHandler { scAcc, err := testContext.Accounts.LoadAccount(scAddress) require.Nil(tb, err) @@ -25,3 +157,21 @@ func getAccountDataTrie(tb testing.TB, testContext *vm.VMTestContext, address [] return dataTrieInstance } + +func createAccWithBalance(t *testing.T, accnts state.AccountsAdapter, pubKey []byte, egldValue *big.Int) { + account, err := accnts.LoadAccount(pubKey) + require.Nil(t, err) + + userAccount, ok := account.(state.UserAccountHandler) + require.True(t, ok) + + userAccount.IncreaseNonce(0) + err = userAccount.AddToBalance(egldValue) + require.Nil(t, err) + + err = accnts.SaveAccount(userAccount) + require.Nil(t, err) + + _, err = accnts.Commit() + require.Nil(t, err) +} diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 53c6644b679..c8787d99db5 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -19,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" @@ -28,9 +25,13 @@ import ( const returnOkData = "@6f6b" func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: 10, - }) + }, 1) require.Nil(t, err) defer testContext.Close() @@ -55,13 +56,13 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( require.Equal(t, vmcommon.Ok, retCode) require.Nil(t, err) - vm.TestAccount(t, testContext.Accounts, sndAddr, 1, big.NewInt(9721810)) + vm.TestAccount(t, testContext.Accounts, sndAddr, 1, big.NewInt(9263230)) // check accumulated fees accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(278190), accumulatedFees) + require.Equal(t, big.NewInt(736770), accumulatedFees) developerFees := testContext.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(27775), developerFees) + require.Equal(t, big.NewInt(73633), developerFees) utils.CleanAccumulatedIntermediateTransactions(t, testContext) @@ -76,13 +77,13 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( _, err = testContext.Accounts.Commit() require.Nil(t, err) - vm.TestAccount(t, testContext.Accounts, rcvAddr, 1, big.NewInt(9721810)) + vm.TestAccount(t, testContext.Accounts, rcvAddr, 1, big.NewInt(9263230)) // check accumulated fees accumulatedFees = testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(556380), accumulatedFees) + require.Equal(t, big.NewInt(1473540), accumulatedFees) developerFees = testContext.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(55550), developerFees) + require.Equal(t, big.NewInt(147266), developerFees) ret := vm.GetVmOutput(nil, testContext.Accounts, scAddress, "resolve", userName) dnsUserNameAddr := ret.ReturnData[0] @@ -115,6 +116,10 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( // relayer address is in shard 2, creates a transaction on the behalf of the user from shard 2, that will call the DNS contract // from shard 1. func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompatibility(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ ChangeUsernameEnableEpoch: 1000, // flag disabled, backwards compatibility SCProcessorV2EnableEpoch: 1000, @@ -124,8 +129,9 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompat testContextForDNSContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( 1, enableEpochs, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, + 1, ) require.Nil(t, err) defer testContextForDNSContract.Close() @@ -133,8 +139,9 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompat testContextForRelayerAndUser, err := vm.CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( 2, enableEpochs, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, + 1, ) require.Nil(t, err) defer testContextForRelayerAndUser.Close() @@ -191,15 +198,17 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompat } func TestDeployDNSContract_TestGasWhenSaveUsernameAfterDNSv2IsActivated(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContextForDNSContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{ - DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + FixRelayedBaseCostEnableEpoch: integrationTests.UnreachableEpoch, + }, 1) require.Nil(t, err) defer testContextForDNSContract.Close() - testContextForRelayerAndUser, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{ - DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + testContextForRelayerAndUser, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextForRelayerAndUser.Close() scAddress, _ := utils.DoDeployDNS(t, testContextForDNSContract, "../../multiShard/smartContract/dns/dns.wasm") @@ -265,7 +274,7 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( // generate the user transaction userTxData := []byte("register@" + hex.EncodeToString(args.username)) - userTxGasLimit := uint64(200000) + userTxGasLimit := uint64(2000000) userTx := vm.CreateTransaction( getNonce(args.testContextForRelayerAndUser, args.userAddress), big.NewInt(0), diff --git a/integrationTests/vm/txsFee/dynamicGasCost_test.go b/integrationTests/vm/txsFee/dynamicGasCost_test.go index a8c8a8eb9eb..08edae2af13 100644 --- a/integrationTests/vm/txsFee/dynamicGasCost_test.go +++ b/integrationTests/vm/txsFee/dynamicGasCost_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -23,13 +19,17 @@ import ( ) func TestDynamicGasCostForDataTrieStorageLoad(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: 0, } shardCoordinator, _ := sharding.NewMultiShardCoordinator(3, 1) gasScheduleNotifier := vm.CreateMockGasScheduleNotifier() - testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas(enableEpochs, shardCoordinator, integrationTests.CreateMemUnit(), gasScheduleNotifier) + testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas(enableEpochs, shardCoordinator, integrationTests.CreateMemUnit(), gasScheduleNotifier, 1) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/esdtLocalBurn_test.go b/integrationTests/vm/txsFee/esdtLocalBurn_test.go index c76957928a5..681c7e293b4 100644 --- a/integrationTests/vm/txsFee/esdtLocalBurn_test.go +++ b/integrationTests/vm/txsFee/esdtLocalBurn_test.go @@ -14,7 +14,11 @@ import ( ) func TestESDTLocalBurnShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -44,7 +48,11 @@ func TestESDTLocalBurnShouldWork(t *testing.T) { } func TestESDTLocalBurnMoreThanTotalBalanceShouldErr(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -74,7 +82,11 @@ func TestESDTLocalBurnMoreThanTotalBalanceShouldErr(t *testing.T) { } func TestESDTLocalBurnNotAllowedShouldErr(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/esdtLocalMint_test.go b/integrationTests/vm/txsFee/esdtLocalMint_test.go index 491d9102372..516402c80a4 100644 --- a/integrationTests/vm/txsFee/esdtLocalMint_test.go +++ b/integrationTests/vm/txsFee/esdtLocalMint_test.go @@ -14,7 +14,11 @@ import ( ) func TestESDTLocalMintShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -44,7 +48,11 @@ func TestESDTLocalMintShouldWork(t *testing.T) { } func TestESDTLocalMintNotAllowedShouldErr(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/esdtMetaDataRecreate_test.go b/integrationTests/vm/txsFee/esdtMetaDataRecreate_test.go new file mode 100644 index 00000000000..8e46c5d613b --- /dev/null +++ b/integrationTests/vm/txsFee/esdtMetaDataRecreate_test.go @@ -0,0 +1,99 @@ +package txsFee + +import ( + "bytes" + "encoding/hex" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests/vm" + "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +func TestESDTMetaDataRecreate(t *testing.T) { + tokenTypes := getDynamicTokenTypes() + for _, tokenType := range tokenTypes { + testName := "metaDataRecreate for " + tokenType + t.Run(testName, func(t *testing.T) { + runEsdtMetaDataRecreateTest(t, tokenType) + }) + } +} + +func runEsdtMetaDataRecreateTest(t *testing.T, tokenType string) { + sndAddr := []byte("12345678901234567890123456789012") + token := []byte("tokenId") + roles := [][]byte{[]byte(core.ESDTRoleNFTRecreate), []byte(core.ESDTRoleNFTCreate)} + baseEsdtKeyPrefix := core.ProtectedKeyPrefix + core.ESDTKeyIdentifier + key := append([]byte(baseEsdtKeyPrefix), token...) + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) + require.Nil(t, err) + defer testContext.Close() + + createAccWithBalance(t, testContext.Accounts, sndAddr, big.NewInt(100000000)) + createAccWithBalance(t, testContext.Accounts, core.ESDTSCAddress, big.NewInt(100000000)) + utils.SetESDTRoles(t, testContext.Accounts, sndAddr, token, roles) + + tx := setTokenTypeTx(core.ESDTSCAddress, 100000, token, tokenType) + retCode, err := testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + defaultMetaData := GetDefaultMetaData() + tx = createTokenTx(sndAddr, sndAddr, 100000, 1, defaultMetaData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + // TODO change default metadata + defaultMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + tx = esdtMetaDataRecreateTx(sndAddr, sndAddr, 100000, defaultMetaData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + _, err = testContext.Accounts.Commit() + require.Nil(t, err) + + checkMetaData(t, testContext, core.SystemAccountAddress, key, defaultMetaData) +} + +func esdtMetaDataRecreateTx( + sndAddr []byte, + rcvAddr []byte, + gasLimit uint64, + metaData *MetaData, +) *transaction.Transaction { + txDataField := bytes.Join( + [][]byte{ + []byte(core.ESDTMetaDataRecreate), + metaData.TokenId, + metaData.Nonce, + metaData.Name, + metaData.Royalties, + metaData.Hash, + metaData.Attributes, + metaData.Uris[0], + metaData.Uris[1], + metaData.Uris[2], + }, + []byte("@"), + ) + + return &transaction.Transaction{ + Nonce: 1, + SndAddr: sndAddr, + RcvAddr: rcvAddr, + GasLimit: gasLimit, + GasPrice: gasPrice, + + Data: txDataField, + Value: big.NewInt(0), + } +} diff --git a/integrationTests/vm/txsFee/esdtMetaDataUpdate_test.go b/integrationTests/vm/txsFee/esdtMetaDataUpdate_test.go new file mode 100644 index 00000000000..53174e22a35 --- /dev/null +++ b/integrationTests/vm/txsFee/esdtMetaDataUpdate_test.go @@ -0,0 +1,100 @@ +package txsFee + +import ( + "bytes" + "encoding/hex" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests/vm" + "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +func TestESDTMetaDataUpdate(t *testing.T) { + tokenTypes := getDynamicTokenTypes() + for _, tokenType := range tokenTypes { + testName := "metaDataUpdate for " + tokenType + t.Run(testName, func(t *testing.T) { + runEsdtMetaDataUpdateTest(t, tokenType) + }) + } +} + +func runEsdtMetaDataUpdateTest(t *testing.T, tokenType string) { + sndAddr := []byte("12345678901234567890123456789012") + token := []byte("tokenId") + roles := [][]byte{[]byte(core.ESDTRoleNFTUpdate), []byte(core.ESDTRoleNFTCreate)} + baseEsdtKeyPrefix := core.ProtectedKeyPrefix + core.ESDTKeyIdentifier + key := append([]byte(baseEsdtKeyPrefix), token...) + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) + require.Nil(t, err) + defer testContext.Close() + + createAccWithBalance(t, testContext.Accounts, sndAddr, big.NewInt(100000000)) + createAccWithBalance(t, testContext.Accounts, core.ESDTSCAddress, big.NewInt(100000000)) + utils.SetESDTRoles(t, testContext.Accounts, sndAddr, token, roles) + + tx := setTokenTypeTx(core.ESDTSCAddress, 100000, token, tokenType) + retCode, err := testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + defaultMetaData := GetDefaultMetaData() + tx = createTokenTx(sndAddr, sndAddr, 100000, 1, defaultMetaData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + // TODO change default metadata + defaultMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + defaultMetaData.Name = []byte(hex.EncodeToString([]byte("newName"))) + defaultMetaData.Hash = []byte(hex.EncodeToString([]byte("newHash"))) + defaultMetaData.Uris = [][]byte{defaultMetaData.Uris[1]} + tx = esdtMetaDataUpdateTx(sndAddr, sndAddr, 100000, defaultMetaData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + _, err = testContext.Accounts.Commit() + require.Nil(t, err) + + checkMetaData(t, testContext, core.SystemAccountAddress, key, defaultMetaData) +} + +func esdtMetaDataUpdateTx( + sndAddr []byte, + rcvAddr []byte, + gasLimit uint64, + metaData *MetaData, +) *transaction.Transaction { + txDataField := bytes.Join( + [][]byte{ + []byte(core.ESDTMetaDataUpdate), + metaData.TokenId, + metaData.Nonce, + metaData.Name, + metaData.Royalties, + metaData.Hash, + metaData.Attributes, + metaData.Uris[0], + }, + []byte("@"), + ) + + return &transaction.Transaction{ + Nonce: 1, + SndAddr: sndAddr, + RcvAddr: rcvAddr, + GasLimit: gasLimit, + GasPrice: gasPrice, + + Data: txDataField, + Value: big.NewInt(0), + } +} diff --git a/integrationTests/vm/txsFee/esdtModifyCreator_test.go b/integrationTests/vm/txsFee/esdtModifyCreator_test.go new file mode 100644 index 00000000000..ead51c5d61d --- /dev/null +++ b/integrationTests/vm/txsFee/esdtModifyCreator_test.go @@ -0,0 +1,97 @@ +package txsFee + +import ( + "bytes" + "encoding/hex" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests/vm" + "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +func TestESDTModifyCreator(t *testing.T) { + tokenTypes := getDynamicTokenTypes() + for _, tokenType := range tokenTypes { + esdtType, _ := core.ConvertESDTTypeToUint32(tokenType) + if !core.IsDynamicESDT(esdtType) { + continue + } + testName := "esdtModifyCreator for " + tokenType + t.Run(testName, func(t *testing.T) { + runEsdtModifyCreatorTest(t, tokenType) + }) + } +} + +func runEsdtModifyCreatorTest(t *testing.T, tokenType string) { + newCreator := []byte("12345678901234567890123456789012") + creatorAddr := []byte("12345678901234567890123456789013") + token := []byte("tokenId") + baseEsdtKeyPrefix := core.ProtectedKeyPrefix + core.ESDTKeyIdentifier + key := append([]byte(baseEsdtKeyPrefix), token...) + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) + require.Nil(t, err) + defer testContext.Close() + + createAccWithBalance(t, testContext.Accounts, newCreator, big.NewInt(100000000)) + createAccWithBalance(t, testContext.Accounts, creatorAddr, big.NewInt(100000000)) + createAccWithBalance(t, testContext.Accounts, core.ESDTSCAddress, big.NewInt(100000000)) + utils.SetESDTRoles(t, testContext.Accounts, creatorAddr, token, [][]byte{[]byte(core.ESDTRoleNFTCreate)}) + utils.SetESDTRoles(t, testContext.Accounts, newCreator, token, [][]byte{[]byte(core.ESDTRoleModifyCreator)}) + + tx := setTokenTypeTx(core.ESDTSCAddress, 100000, token, tokenType) + retCode, err := testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + defaultMetaData := GetDefaultMetaData() + defaultMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + tx = createTokenTx(creatorAddr, creatorAddr, 100000, 1, defaultMetaData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + tx = esdtModifyCreatorTx(newCreator, newCreator, 100000, defaultMetaData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + _, err = testContext.Accounts.Commit() + require.Nil(t, err) + + retrievedMetaData := getMetaDataFromAcc(t, testContext, core.SystemAccountAddress, key) + require.Equal(t, newCreator, retrievedMetaData.Creator) +} + +func esdtModifyCreatorTx( + sndAddr []byte, + rcvAddr []byte, + gasLimit uint64, + metaData *MetaData, +) *transaction.Transaction { + txDataField := bytes.Join( + [][]byte{ + []byte(core.ESDTModifyCreator), + metaData.TokenId, + metaData.Nonce, + }, + []byte("@"), + ) + return &transaction.Transaction{ + Nonce: 0, + SndAddr: sndAddr, + RcvAddr: rcvAddr, + GasLimit: gasLimit, + GasPrice: gasPrice, + + Data: txDataField, + Value: big.NewInt(0), + } +} diff --git a/integrationTests/vm/txsFee/esdtModifyRoyalties_test.go b/integrationTests/vm/txsFee/esdtModifyRoyalties_test.go new file mode 100644 index 00000000000..f4ef7dc9f49 --- /dev/null +++ b/integrationTests/vm/txsFee/esdtModifyRoyalties_test.go @@ -0,0 +1,92 @@ +package txsFee + +import ( + "bytes" + "encoding/hex" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests/vm" + "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +func TestESDTModifyRoyalties(t *testing.T) { + tokenTypes := getDynamicTokenTypes() + for _, tokenType := range tokenTypes { + testName := "esdtModifyRoyalties for " + tokenType + t.Run(testName, func(t *testing.T) { + runEsdtModifyRoyaltiesTest(t, tokenType) + }) + } +} + +func runEsdtModifyRoyaltiesTest(t *testing.T, tokenType string) { + creatorAddr := []byte("12345678901234567890123456789013") + token := []byte("tokenId") + baseEsdtKeyPrefix := core.ProtectedKeyPrefix + core.ESDTKeyIdentifier + key := append([]byte(baseEsdtKeyPrefix), token...) + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) + require.Nil(t, err) + defer testContext.Close() + + createAccWithBalance(t, testContext.Accounts, creatorAddr, big.NewInt(100000000)) + createAccWithBalance(t, testContext.Accounts, core.ESDTSCAddress, big.NewInt(100000000)) + utils.SetESDTRoles(t, testContext.Accounts, creatorAddr, token, [][]byte{[]byte(core.ESDTRoleModifyRoyalties), []byte(core.ESDTRoleNFTCreate)}) + + tx := setTokenTypeTx(core.ESDTSCAddress, 100000, token, tokenType) + retCode, err := testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + defaultMetaData := GetDefaultMetaData() + tx = createTokenTx(creatorAddr, creatorAddr, 100000, 1, defaultMetaData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + defaultMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + defaultMetaData.Royalties = []byte(hex.EncodeToString(big.NewInt(20).Bytes())) + tx = esdtModifyRoyaltiesTx(creatorAddr, creatorAddr, 100000, defaultMetaData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + _, err = testContext.Accounts.Commit() + require.Nil(t, err) + + retrievedMetaData := getMetaDataFromAcc(t, testContext, core.SystemAccountAddress, key) + require.Equal(t, uint32(big.NewInt(20).Uint64()), retrievedMetaData.Royalties) +} + +func esdtModifyRoyaltiesTx( + sndAddr []byte, + rcvAddr []byte, + gasLimit uint64, + metaData *MetaData, +) *transaction.Transaction { + txDataField := bytes.Join( + [][]byte{ + []byte(core.ESDTModifyRoyalties), + metaData.TokenId, + metaData.Nonce, + metaData.Royalties, + }, + []byte("@"), + ) + return &transaction.Transaction{ + Nonce: 1, + SndAddr: sndAddr, + RcvAddr: rcvAddr, + GasLimit: gasLimit, + GasPrice: gasPrice, + + Data: txDataField, + Value: big.NewInt(0), + } +} diff --git a/integrationTests/vm/txsFee/esdtSetNewURIs_test.go b/integrationTests/vm/txsFee/esdtSetNewURIs_test.go new file mode 100644 index 00000000000..66ec209c3ef --- /dev/null +++ b/integrationTests/vm/txsFee/esdtSetNewURIs_test.go @@ -0,0 +1,95 @@ +package txsFee + +import ( + "bytes" + "encoding/hex" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests/vm" + "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +func TestESDTSetNewURIs(t *testing.T) { + tokenTypes := getDynamicTokenTypes() + for _, tokenType := range tokenTypes { + testName := "ESDTsetNewURIs for " + tokenType + t.Run(testName, func(t *testing.T) { + runEsdtSetNewURIsTest(t, tokenType) + }) + } +} + +func runEsdtSetNewURIsTest(t *testing.T, tokenType string) { + sndAddr := []byte("12345678901234567890123456789012") + token := []byte("tokenId") + roles := [][]byte{[]byte(core.ESDTRoleSetNewURI), []byte(core.ESDTRoleNFTCreate)} + baseEsdtKeyPrefix := core.ProtectedKeyPrefix + core.ESDTKeyIdentifier + key := append([]byte(baseEsdtKeyPrefix), token...) + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) + require.Nil(t, err) + defer testContext.Close() + + createAccWithBalance(t, testContext.Accounts, sndAddr, big.NewInt(100000000)) + createAccWithBalance(t, testContext.Accounts, core.ESDTSCAddress, big.NewInt(100000000)) + utils.SetESDTRoles(t, testContext.Accounts, sndAddr, token, roles) + + tx := setTokenTypeTx(core.ESDTSCAddress, 100000, token, tokenType) + retCode, err := testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + defaultMetaData := GetDefaultMetaData() + tx = createTokenTx(sndAddr, sndAddr, 100000, 1, defaultMetaData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + defaultMetaData.Uris = [][]byte{[]byte(hex.EncodeToString([]byte("newUri1"))), []byte(hex.EncodeToString([]byte("newUri2")))} + defaultMetaData.Nonce = []byte(hex.EncodeToString(big.NewInt(1).Bytes())) + tx = esdtSetNewUrisTx(sndAddr, sndAddr, 100000, defaultMetaData) + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + _, err = testContext.Accounts.Commit() + require.Nil(t, err) + + retrievedMetaData := getMetaDataFromAcc(t, testContext, core.SystemAccountAddress, key) + require.Equal(t, [][]byte{[]byte("newUri1"), []byte("newUri2")}, retrievedMetaData.URIs) +} + +func esdtSetNewUrisTx( + sndAddr []byte, + rcvAddr []byte, + gasLimit uint64, + metaData *MetaData, +) *transaction.Transaction { + txDataField := bytes.Join( + [][]byte{ + []byte(core.ESDTSetNewURIs), + metaData.TokenId, + metaData.Nonce, + metaData.Uris[0], + metaData.Uris[1], + }, + []byte("@"), + ) + + return &transaction.Transaction{ + Nonce: 1, + SndAddr: sndAddr, + RcvAddr: rcvAddr, + GasLimit: gasLimit, + GasPrice: gasPrice, + + Data: txDataField, + Value: big.NewInt(0), + } +} diff --git a/integrationTests/vm/txsFee/esdt_test.go b/integrationTests/vm/txsFee/esdt_test.go index da865619d4e..d51848762e8 100644 --- a/integrationTests/vm/txsFee/esdt_test.go +++ b/integrationTests/vm/txsFee/esdt_test.go @@ -18,7 +18,11 @@ import ( ) func TestESDTTransferShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -54,7 +58,11 @@ func TestESDTTransferShouldWork(t *testing.T) { } func TestESDTTransferShouldWorkToMuchGasShouldConsumeAllGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -90,7 +98,11 @@ func TestESDTTransferShouldWorkToMuchGasShouldConsumeAllGas(t *testing.T) { } func TestESDTTransferInvalidESDTValueShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -126,8 +138,12 @@ func TestESDTTransferInvalidESDTValueShouldConsumeGas(t *testing.T) { } func TestESDTTransferCallBackOnErrorShouldNotGenerateSCRsFurther(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + shardC, _ := sharding.NewMultiShardCoordinator(2, 0) - testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinator(config.EnableEpochs{}, shardC) + testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinator(config.EnableEpochs{}, shardC, 1) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/guardAccount_test.go b/integrationTests/vm/txsFee/guardAccount_test.go index 2baa497f991..52a64322bb1 100644 --- a/integrationTests/vm/txsFee/guardAccount_test.go +++ b/integrationTests/vm/txsFee/guardAccount_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -99,14 +95,15 @@ func prepareTestContextForGuardedAccounts(tb testing.TB) *vm.VMTestContext { testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGasAndRoundConfig( config.EnableEpochs{ GovernanceEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, SetSenderInEeiOutputTransferEnableEpoch: unreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, + FixRelayedBaseCostEnableEpoch: unreachableEpoch, }, testscommon.NewMultiShardsCoordinatorMock(2), db, gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), + 1, ) require.Nil(tb, err) @@ -351,6 +348,10 @@ func setNewEpochOnContext(testContext *vm.VMTestContext, epoch uint32) { } func TestGuardAccount_ShouldErrorIfInstantSetIsDoneOnANotProtectedAccount(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -368,6 +369,10 @@ func TestGuardAccount_ShouldErrorIfInstantSetIsDoneOnANotProtectedAccount(t *tes } func TestGuardAccount_ShouldSetGuardianOnANotProtectedAccount(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -468,6 +473,10 @@ func TestGuardAccount_ShouldSetGuardianOnANotProtectedAccount(t *testing.T) { } func TestGuardAccount_SendingFundsWhileProtectedAndNotProtected(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -593,6 +602,10 @@ func TestGuardAccount_SendingFundsWhileProtectedAndNotProtected(t *testing.T) { // 14. alice un-guards the accounts immediately using a cosigned transaction and then sends a guarded transaction -> should error // 14.1 alice sends unguarded transaction -> should work func TestGuardAccount_Scenario1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -917,6 +930,10 @@ func TestGuardAccount_Scenario1(t *testing.T) { // 3.1 cosigned transaction should work // 3.2 single signed transaction should not work func TestGuardAccounts_RelayedTransactionV1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -962,7 +979,7 @@ func TestGuardAccounts_RelayedTransactionV1(t *testing.T) { alice, david, gasPrice, - transferGas+guardianSigVerificationGas, + 1+guardianSigVerificationGas, make([]byte, 0)) userTx.GuardianAddr = bob @@ -970,7 +987,7 @@ func TestGuardAccounts_RelayedTransactionV1(t *testing.T) { userTx.Version = txWithOptionVersion rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + transferGas + guardianSigVerificationGas + uint64(len(rtxData)) + rTxGasLimit := minGasLimit + guardianSigVerificationGas + minGasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(getNonce(testContext, charlie), big.NewInt(0), charlie, alice, gasPrice, rTxGasLimit, rtxData) returnCode, err = testContext.TxProcessor.ProcessTransaction(rtx) require.Nil(t, err) @@ -1001,13 +1018,13 @@ func TestGuardAccounts_RelayedTransactionV1(t *testing.T) { alice, david, gasPrice, - transferGas+guardianSigVerificationGas, + minGasLimit, make([]byte, 0)) userTx.Version = txWithOptionVersion rtxData = integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit = 1 + transferGas + guardianSigVerificationGas + uint64(len(rtxData)) + rTxGasLimit = minGasLimit + minGasLimit + uint64(len(rtxData)) rtx = vm.CreateTransaction(getNonce(testContext, charlie), big.NewInt(0), charlie, alice, gasPrice, rTxGasLimit, rtxData) returnCode, err = testContext.TxProcessor.ProcessTransaction(rtx) require.Nil(t, err) @@ -1037,6 +1054,10 @@ func TestGuardAccounts_RelayedTransactionV1(t *testing.T) { // 3.1 cosigned transaction should not work // 3.2 single signed transaction should not work func TestGuardAccounts_RelayedTransactionV2(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext := prepareTestContextForGuardedAccounts(t) defer testContext.Close() @@ -1076,14 +1097,14 @@ func TestGuardAccounts_RelayedTransactionV2(t *testing.T) { testContext.CleanIntermediateTransactions(t) // step 3 - charlie sends a relayed transaction v1 on the behalf of alice - // 3.1 cosigned transaction should work + // 3.1 cosigned transaction should not work userTx := vm.CreateTransaction( getNonce(testContext, alice), transferValue, alice, david, gasPrice, - transferGas+guardianSigVerificationGas, + 1+guardianSigVerificationGas, make([]byte, 0)) userTx.GuardianAddr = bob @@ -1091,7 +1112,7 @@ func TestGuardAccounts_RelayedTransactionV2(t *testing.T) { userTx.Version = txWithOptionVersion rtxData := integrationTests.PrepareRelayedTxDataV2(userTx) - rTxGasLimit := 1 + transferGas + guardianSigVerificationGas + uint64(len(rtxData)) + rTxGasLimit := minGasLimit + guardianSigVerificationGas + minGasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(getNonce(testContext, charlie), big.NewInt(0), charlie, alice, gasPrice, rTxGasLimit, rtxData) returnCode, err = testContext.TxProcessor.ProcessTransaction(rtx) require.Nil(t, err) @@ -1110,7 +1131,8 @@ func TestGuardAccounts_RelayedTransactionV2(t *testing.T) { assert.Equal(t, aliceCurrentBalance, getBalance(testContext, alice)) bobExpectedBalance := big.NewInt(0).Set(initialMint) assert.Equal(t, bobExpectedBalance, getBalance(testContext, bob)) - charlieExpectedBalance := big.NewInt(0).Sub(initialMint, big.NewInt(int64(rTxGasLimit*gasPrice))) + charlieConsumed := minGasLimit + guardianSigVerificationGas + minGasLimit + uint64(len(rtxData)) + charlieExpectedBalance := big.NewInt(0).Sub(initialMint, big.NewInt(int64(charlieConsumed*gasPrice))) assert.Equal(t, charlieExpectedBalance, getBalance(testContext, charlie)) assert.Equal(t, initialMint, getBalance(testContext, david)) @@ -1124,13 +1146,13 @@ func TestGuardAccounts_RelayedTransactionV2(t *testing.T) { alice, david, gasPrice, - transferGas+guardianSigVerificationGas, + minGasLimit, make([]byte, 0)) userTx.Version = txWithOptionVersion rtxData = integrationTests.PrepareRelayedTxDataV2(userTx) - rTxGasLimit = 1 + transferGas + guardianSigVerificationGas + uint64(len(rtxData)) + rTxGasLimit = minGasLimit + minGasLimit + uint64(len(rtxData)) rtx = vm.CreateTransaction(getNonce(testContext, charlie), big.NewInt(0), charlie, alice, gasPrice, rTxGasLimit, rtxData) returnCode, err = testContext.TxProcessor.ProcessTransaction(rtx) require.Nil(t, err) diff --git a/integrationTests/vm/txsFee/migrateDataTrie_test.go b/integrationTests/vm/txsFee/migrateDataTrie_test.go index a4bc4ad1e0f..d089be8fc14 100644 --- a/integrationTests/vm/txsFee/migrateDataTrie_test.go +++ b/integrationTests/vm/txsFee/migrateDataTrie_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -31,7 +27,9 @@ type dataTrie interface { } func TestMigrateDataTrieBuiltInFunc(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } enableEpochs := config.EnableEpochs{ AutoBalanceDataTriesEnableEpoch: 0, @@ -47,7 +45,7 @@ func TestMigrateDataTrieBuiltInFunc(t *testing.T) { t.Run("deterministic trie", func(t *testing.T) { t.Parallel() - testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas(enableEpochs, shardCoordinator, integrationTests.CreateMemUnit(), gasScheduleNotifier) + testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas(enableEpochs, shardCoordinator, integrationTests.CreateMemUnit(), gasScheduleNotifier, 1) require.Nil(t, err) defer testContext.Close() @@ -125,7 +123,7 @@ func TestMigrateDataTrieBuiltInFunc(t *testing.T) { t.Run("random trie - all leaves are migrated in multiple transactions", func(t *testing.T) { t.Parallel() - testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas(enableEpochs, shardCoordinator, integrationTests.CreateMemUnit(), gasScheduleNotifier) + testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas(enableEpochs, shardCoordinator, integrationTests.CreateMemUnit(), gasScheduleNotifier, 1) require.Nil(t, err) defer testContext.Close() @@ -215,7 +213,8 @@ func generateDataTrie( for i := 1; i < numLeaves; i++ { key := keyGenerator(i) - err := tr.UpdateWithVersion(key, key, core.NotSpecified) + value := getValWithAppendedData(key, key, accAddr) + err := tr.UpdateWithVersion(key, value, core.NotSpecified) require.Nil(t, err) keys[i] = key @@ -226,6 +225,13 @@ func generateDataTrie( return rootHash, keys } +func getValWithAppendedData(key, val, address []byte) []byte { + suffix := append(key, address...) + val = append(val, suffix...) + + return val +} + func initDataTrie( t *testing.T, testContext *vm.VMTestContext, diff --git a/integrationTests/vm/txsFee/moveBalance_test.go b/integrationTests/vm/txsFee/moveBalance_test.go index 78646813825..8e847dba20b 100644 --- a/integrationTests/vm/txsFee/moveBalance_test.go +++ b/integrationTests/vm/txsFee/moveBalance_test.go @@ -16,11 +16,13 @@ import ( "github.com/stretchr/testify/require" ) -const gasPrice = uint64(10) - // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceSelfShouldWorkAndConsumeTxFee(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -55,7 +57,11 @@ func TestMoveBalanceSelfShouldWorkAndConsumeTxFee(t *testing.T) { // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceAllFlagsEnabledLessBalanceThanGasLimitMulGasPrice(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -72,7 +78,11 @@ func TestMoveBalanceAllFlagsEnabledLessBalanceThanGasLimitMulGasPrice(t *testing } func TestMoveBalanceShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -112,7 +122,11 @@ func TestMoveBalanceShouldWork(t *testing.T) { } func TestMoveBalanceInvalidHasGasButNoValueShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -141,7 +155,11 @@ func TestMoveBalanceInvalidHasGasButNoValueShouldConsumeGas(t *testing.T) { } func TestMoveBalanceHigherNonceShouldNotConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -171,7 +189,11 @@ func TestMoveBalanceHigherNonceShouldNotConsumeGas(t *testing.T) { } func TestMoveBalanceMoreGasThanGasLimitPerMiniBlockForSafeCrossShard(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -202,7 +224,11 @@ func TestMoveBalanceMoreGasThanGasLimitPerMiniBlockForSafeCrossShard(t *testing. } func TestMoveBalanceInvalidUserNames(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/multiESDTTransfer_test.go b/integrationTests/vm/txsFee/multiESDTTransfer_test.go index d9457da31c5..adaf89ad340 100644 --- a/integrationTests/vm/txsFee/multiESDTTransfer_test.go +++ b/integrationTests/vm/txsFee/multiESDTTransfer_test.go @@ -15,7 +15,11 @@ import ( ) func TestMultiESDTTransferShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -69,10 +73,14 @@ func TestMultiESDTTransferShouldWork(t *testing.T) { } func TestMultiESDTTransferFailsBecauseOfMaxLimit(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMsAndCustomGasSchedule(config.EnableEpochs{}, func(gasMap wasmConfig.GasScheduleMap) { gasMap[common.MaxPerTransaction]["MaxNumberOfTransfersPerTx"] = 1 - }) + }, 1) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go b/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go index aac3723f294..573370bab26 100644 --- a/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go +++ b/integrationTests/vm/txsFee/multiShard/asyncCallWithChangeOwner_test.go @@ -17,7 +17,7 @@ import ( func TestDoChangeOwnerCrossShardFromAContract(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } enableEpochs := config.EnableEpochs{ @@ -25,7 +25,7 @@ func TestDoChangeOwnerCrossShardFromAContract(t *testing.T) { ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 0, } - testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs) + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs, 1) require.Nil(t, err) defer testContextSource.Close() @@ -42,7 +42,7 @@ func TestDoChangeOwnerCrossShardFromAContract(t *testing.T) { require.Equal(t, uint32(0), testContextSource.ShardCoordinator.ComputeId(firstContract)) require.Equal(t, uint32(0), testContextSource.ShardCoordinator.ComputeId(firstOwner)) - testContextSecondContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs) + testContextSecondContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs, 1) require.Nil(t, err) defer testContextSecondContract.Close() diff --git a/integrationTests/vm/txsFee/multiShard/asyncCall_test.go b/integrationTests/vm/txsFee/multiShard/asyncCall_test.go index 181d937e55e..c02aed11578 100644 --- a/integrationTests/vm/txsFee/multiShard/asyncCall_test.go +++ b/integrationTests/vm/txsFee/multiShard/asyncCall_test.go @@ -1,5 +1,3 @@ -//go:build !race - package multiShard import ( @@ -11,29 +9,29 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" + "github.com/multiversx/mx-chain-go/testscommon" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" ) func TestAsyncCallShouldWork(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, } - testContextFirstContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs) + testContextFirstContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs, 1) require.Nil(t, err) defer testContextFirstContract.Close() - testContextSecondContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs) + testContextSecondContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs, 1) require.Nil(t, err) defer testContextSecondContract.Close() - testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, enableEpochs) + testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, enableEpochs, 1) require.Nil(t, err) defer testContextSender.Close() @@ -99,8 +97,8 @@ func TestAsyncCallShouldWork(t *testing.T) { res := vm.GetIntValueFromSC(nil, testContextFirstContract.Accounts, firstScAddress, "numCalled") require.Equal(t, big.NewInt(1), res) - require.Equal(t, big.NewInt(5540), testContextFirstContract.TxFeeHandler.GetAccumulatedFees()) - require.Equal(t, big.NewInt(554), testContextFirstContract.TxFeeHandler.GetDeveloperFees()) + require.Equal(t, big.NewInt(158400), testContextFirstContract.TxFeeHandler.GetAccumulatedFees()) + require.Equal(t, big.NewInt(15840), testContextFirstContract.TxFeeHandler.GetDeveloperFees()) intermediateTxs = testContextFirstContract.GetIntermediateTransactions(t) require.NotNil(t, intermediateTxs) @@ -109,8 +107,8 @@ func TestAsyncCallShouldWork(t *testing.T) { scr = intermediateTxs[0] utils.ProcessSCRResult(t, testContextSecondContract, scr, vmcommon.Ok, nil) - require.Equal(t, big.NewInt(49990510), testContextSecondContract.TxFeeHandler.GetAccumulatedFees()) - require.Equal(t, big.NewInt(4999051), testContextSecondContract.TxFeeHandler.GetDeveloperFees()) + require.Equal(t, big.NewInt(49837650), testContextSecondContract.TxFeeHandler.GetAccumulatedFees()) + require.Equal(t, big.NewInt(4983765), testContextSecondContract.TxFeeHandler.GetDeveloperFees()) intermediateTxs = testContextSecondContract.GetIntermediateTransactions(t) require.NotNil(t, intermediateTxs) @@ -118,7 +116,7 @@ func TestAsyncCallShouldWork(t *testing.T) { func TestAsyncCallDisabled(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Arwen fix") + t.Skip("this is not a short test") } enableEpochs := config.EnableEpochs{ @@ -128,20 +126,20 @@ func TestAsyncCallDisabled(t *testing.T) { SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, } - roundsConfig := integrationTests.GetDefaultRoundsConfig() + roundsConfig := testscommon.GetDefaultRoundsConfig() activationRound := roundsConfig.RoundActivations["DisableAsyncCallV1"] activationRound.Round = "0" roundsConfig.RoundActivations["DisableAsyncCallV1"] = activationRound - testContextFirstContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(0, enableEpochs, roundsConfig) + testContextFirstContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(0, enableEpochs, roundsConfig, 1) require.Nil(t, err) defer testContextFirstContract.Close() - testContextSecondContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(1, enableEpochs, roundsConfig) + testContextSecondContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(1, enableEpochs, roundsConfig, 1) require.Nil(t, err) defer testContextSecondContract.Close() - testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(2, enableEpochs, roundsConfig) + testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(2, enableEpochs, roundsConfig, 1) require.Nil(t, err) defer testContextSender.Close() diff --git a/integrationTests/vm/txsFee/multiShard/asyncESDT_test.go b/integrationTests/vm/txsFee/multiShard/asyncESDT_test.go index 114859ac5bf..aa37bc6bf94 100644 --- a/integrationTests/vm/txsFee/multiShard/asyncESDT_test.go +++ b/integrationTests/vm/txsFee/multiShard/asyncESDT_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package multiShard import ( @@ -18,19 +14,23 @@ import ( ) func TestAsyncESDTTransferWithSCCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, } - testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs) + testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs, 1) require.Nil(t, err) defer testContextSender.Close() - testContextFirstContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs) + testContextFirstContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs, 1) require.Nil(t, err) defer testContextFirstContract.Close() - testContextSecondContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, enableEpochs) + testContextSecondContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, enableEpochs, 1) require.Nil(t, err) defer testContextSecondContract.Close() @@ -98,10 +98,10 @@ func TestAsyncESDTTransferWithSCCallShouldWork(t *testing.T) { _, err = testContextSender.Accounts.Commit() require.Nil(t, err) - expectedAccumulatedFees = big.NewInt(189890) + expectedAccumulatedFees = big.NewInt(1146530) require.Equal(t, expectedAccumulatedFees, testContextFirstContract.TxFeeHandler.GetAccumulatedFees()) - developerFees := big.NewInt(18989) + developerFees := big.NewInt(114653) require.Equal(t, developerFees, testContextFirstContract.TxFeeHandler.GetDeveloperFees()) utils.CheckESDTBalance(t, testContextFirstContract, firstSCAddress, token, big.NewInt(2500)) @@ -115,10 +115,10 @@ func TestAsyncESDTTransferWithSCCallShouldWork(t *testing.T) { utils.CheckESDTBalance(t, testContextSecondContract, secondSCAddress, token, big.NewInt(2500)) - accumulatedFee := big.NewInt(62340) + accumulatedFee := big.NewInt(540660) require.Equal(t, accumulatedFee, testContextSecondContract.TxFeeHandler.GetAccumulatedFees()) - developerFees = big.NewInt(6234) + developerFees = big.NewInt(54066) require.Equal(t, developerFees, testContextSecondContract.TxFeeHandler.GetDeveloperFees()) intermediateTxs = testContextSecondContract.GetIntermediateTransactions(t) @@ -126,23 +126,27 @@ func TestAsyncESDTTransferWithSCCallShouldWork(t *testing.T) { utils.ProcessSCRResult(t, testContextFirstContract, intermediateTxs[1], vmcommon.Ok, nil) - require.Equal(t, big.NewInt(4936720), testContextFirstContract.TxFeeHandler.GetAccumulatedFees()) + require.Equal(t, big.NewInt(4458400), testContextFirstContract.TxFeeHandler.GetAccumulatedFees()) } func TestAsyncESDTTransferWithSCCallSecondContractAnotherToken(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, } - testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs) + testContextSender, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs, 1) require.Nil(t, err) defer testContextSender.Close() - testContextFirstContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs) + testContextFirstContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs, 1) require.Nil(t, err) defer testContextFirstContract.Close() - testContextSecondContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, enableEpochs) + testContextSecondContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, enableEpochs, 1) require.Nil(t, err) defer testContextSecondContract.Close() @@ -207,10 +211,10 @@ func TestAsyncESDTTransferWithSCCallSecondContractAnotherToken(t *testing.T) { _, err = testContextSender.Accounts.Commit() require.Nil(t, err) - expectedAccumulatedFees = big.NewInt(189890) + expectedAccumulatedFees = big.NewInt(1146530) require.Equal(t, expectedAccumulatedFees, testContextFirstContract.TxFeeHandler.GetAccumulatedFees()) - developerFees := big.NewInt(18989) + developerFees := big.NewInt(114653) require.Equal(t, developerFees, testContextFirstContract.TxFeeHandler.GetDeveloperFees()) utils.CheckESDTBalance(t, testContextFirstContract, firstSCAddress, token, big.NewInt(2500)) @@ -223,7 +227,7 @@ func TestAsyncESDTTransferWithSCCallSecondContractAnotherToken(t *testing.T) { utils.CheckESDTBalance(t, testContextSecondContract, secondSCAddress, token, big.NewInt(0)) - accumulatedFee := big.NewInt(3720770) + accumulatedFee := big.NewInt(2764130) require.Equal(t, accumulatedFee, testContextSecondContract.TxFeeHandler.GetAccumulatedFees()) developerFees = big.NewInt(0) @@ -234,5 +238,5 @@ func TestAsyncESDTTransferWithSCCallSecondContractAnotherToken(t *testing.T) { utils.ProcessSCRResult(t, testContextFirstContract, intermediateTxs[0], vmcommon.Ok, nil) - require.Equal(t, big.NewInt(1278290), testContextFirstContract.TxFeeHandler.GetAccumulatedFees()) + require.Equal(t, big.NewInt(2234930), testContextFirstContract.TxFeeHandler.GetAccumulatedFees()) } diff --git a/integrationTests/vm/txsFee/multiShard/builtInFunctions_test.go b/integrationTests/vm/txsFee/multiShard/builtInFunctions_test.go index ea14882730b..b8aff559fbc 100644 --- a/integrationTests/vm/txsFee/multiShard/builtInFunctions_test.go +++ b/integrationTests/vm/txsFee/multiShard/builtInFunctions_test.go @@ -33,7 +33,7 @@ func getZeroGasAndFees() scheduled.GasAndFees { // 4. Execute SCR from context destination on context source ( the new owner will receive the developer rewards) func TestBuiltInFunctionExecuteOnSourceAndDestinationShouldWork(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( @@ -41,7 +41,8 @@ func TestBuiltInFunctionExecuteOnSourceAndDestinationShouldWork(t *testing.T) { config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + }, + 1) require.Nil(t, err) defer testContextSource.Close() @@ -50,7 +51,8 @@ func TestBuiltInFunctionExecuteOnSourceAndDestinationShouldWork(t *testing.T) { config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + }, + 1) require.Nil(t, err) defer testContextDst.Close() @@ -66,7 +68,7 @@ func TestBuiltInFunctionExecuteOnSourceAndDestinationShouldWork(t *testing.T) { require.Equal(t, uint32(0), testContextDst.ShardCoordinator.ComputeId(newOwner)) gasPrice := uint64(10) - gasLimit := uint64(1000) + gasLimit := uint64(100000) txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) tx := vm.CreateTransaction(1, big.NewInt(0), owner, scAddr, gasPrice, gasLimit, txData) @@ -80,7 +82,7 @@ func TestBuiltInFunctionExecuteOnSourceAndDestinationShouldWork(t *testing.T) { utils.CheckOwnerAddr(t, testContextDst, scAddr, newOwner) accumulatedFees := testContextDst.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(10000), accumulatedFees) + require.Equal(t, big.NewInt(1000000), accumulatedFees) utils.CleanAccumulatedIntermediateTransactions(t, testContextDst) @@ -93,8 +95,8 @@ func TestBuiltInFunctionExecuteOnSourceAndDestinationShouldWork(t *testing.T) { scUserAcc := scStateAcc.(state.UserAccountHandler) currentSCDevBalance := scUserAcc.GetDeveloperReward() - gasLimit = uint64(500) - _, _ = vm.CreateAccount(testContextDst.Accounts, sndAddr, 0, big.NewInt(10000)) + gasLimit = uint64(50000) + _, _ = vm.CreateAccount(testContextDst.Accounts, sndAddr, 0, big.NewInt(100000000)) tx = vm.CreateTransaction(0, big.NewInt(0), sndAddr, scAddr, gasPrice, gasLimit, []byte("increment")) retCode, err := testContextDst.TxProcessor.ProcessTransaction(tx) @@ -104,18 +106,18 @@ func TestBuiltInFunctionExecuteOnSourceAndDestinationShouldWork(t *testing.T) { _, err = testContextDst.Accounts.Commit() require.Nil(t, err) - expectedBalance := big.NewInt(6130) + expectedBalance := big.NewInt(99843270) vm.TestAccount(t, testContextDst.Accounts, sndAddr, 1, expectedBalance) accumulatedFees = testContextDst.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(13870), accumulatedFees) + require.Equal(t, big.NewInt(1156730), accumulatedFees) developerFees := testContextDst.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(1292), developerFees) + require.Equal(t, big.NewInt(115578), developerFees) // call get developer rewards - gasLimit = 500 - _, _ = vm.CreateAccount(testContextSource.Accounts, newOwner, 0, big.NewInt(10000)) + gasLimit = 500000 + _, _ = vm.CreateAccount(testContextSource.Accounts, newOwner, 0, big.NewInt(10000000)) txData = []byte(core.BuiltInFunctionClaimDeveloperRewards) tx = vm.CreateTransaction(0, big.NewInt(0), newOwner, scAddr, gasPrice, gasLimit, txData) @@ -124,14 +126,14 @@ func TestBuiltInFunctionExecuteOnSourceAndDestinationShouldWork(t *testing.T) { require.Equal(t, vmcommon.Ok, retCode) require.Nil(t, err) - expectedBalance = big.NewInt(5000) + expectedBalance = big.NewInt(5000000) utils.TestAccount(t, testContextSource.Accounts, newOwner, 1, expectedBalance) accumulatedFees = testContextSource.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(5000), accumulatedFees) + require.Equal(t, big.NewInt(5000000), accumulatedFees) developerFees = testContextSource.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(477), developerFees) + require.Equal(t, big.NewInt(499977), developerFees) utils.CleanAccumulatedIntermediateTransactions(t, testContextDst) @@ -145,7 +147,7 @@ func TestBuiltInFunctionExecuteOnSourceAndDestinationShouldWork(t *testing.T) { utils.ProcessSCRResult(t, testContextSource, scr, vmcommon.Ok, nil) - expectedBalance = big.NewInt(5001 + 376 + currentSCDevBalance.Int64()) + expectedBalance = big.NewInt(499977 + 4515686 + currentSCDevBalance.Int64()) utils.TestAccount(t, testContextSource.Accounts, newOwner, 1, expectedBalance) } diff --git a/integrationTests/vm/txsFee/multiShard/esdtLiquidity_test.go b/integrationTests/vm/txsFee/multiShard/esdtLiquidity_test.go index a18a62003e3..6d2fe8cfa00 100644 --- a/integrationTests/vm/txsFee/multiShard/esdtLiquidity_test.go +++ b/integrationTests/vm/txsFee/multiShard/esdtLiquidity_test.go @@ -18,14 +18,18 @@ import ( ) func TestSystemAccountLiquidityAfterCrossShardTransferAndBurn(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tokenID := []byte("MYNFT") sh0Addr := []byte("12345678901234567890123456789010") sh1Addr := []byte("12345678901234567890123456789011") - sh0Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + sh0Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}, 1) require.Nil(t, err) defer sh0Context.Close() - sh1Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + sh1Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer sh1Context.Close() _, _ = vm.CreateAccount(sh1Context.Accounts, sh1Addr, 0, big.NewInt(1000000000)) @@ -66,14 +70,18 @@ func TestSystemAccountLiquidityAfterCrossShardTransferAndBurn(t *testing.T) { } func TestSystemAccountLiquidityAfterNFTWipe(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tokenID := []byte("MYNFT-0a0a0a") sh0Addr := bytes.Repeat([]byte{1}, 31) sh0Addr = append(sh0Addr, 0) - sh0Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + sh0Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}, 1) require.Nil(t, err) defer sh0Context.Close() - metaContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + metaContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}, 1) require.Nil(t, err) defer metaContext.Close() @@ -112,14 +120,18 @@ func TestSystemAccountLiquidityAfterNFTWipe(t *testing.T) { } func TestSystemAccountLiquidityAfterSFTWipe(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tokenID := []byte("MYSFT-0a0a0a") sh0Addr := bytes.Repeat([]byte{1}, 31) sh0Addr = append(sh0Addr, 0) - sh0Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + sh0Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}, 1) require.Nil(t, err) defer sh0Context.Close() - metaContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + metaContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}, 1) require.Nil(t, err) defer metaContext.Close() diff --git a/integrationTests/vm/txsFee/multiShard/esdt_test.go b/integrationTests/vm/txsFee/multiShard/esdt_test.go index f224b528ef6..9dd828eb8c1 100644 --- a/integrationTests/vm/txsFee/multiShard/esdt_test.go +++ b/integrationTests/vm/txsFee/multiShard/esdt_test.go @@ -16,7 +16,11 @@ import ( ) func TestESDTTransferShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -46,6 +50,10 @@ func TestESDTTransferShouldWork(t *testing.T) { } func TestMultiESDTNFTTransferViaRelayedV2(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + tokenID1 := []byte("MYNFT1") tokenID2 := []byte("MYNFT2") sh0Addr := []byte("12345678901234567890123456789010") @@ -53,11 +61,11 @@ func TestMultiESDTNFTTransferViaRelayedV2(t *testing.T) { relayerSh0 := []byte("12345678901234567890123456789110") relayerSh1 := []byte("12345678901234567890123456789111") - sh0Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + sh0Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}, 1) require.Nil(t, err) defer sh0Context.Close() - sh1Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + sh1Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer sh1Context.Close() _, _ = vm.CreateAccount(sh1Context.Accounts, sh1Addr, 0, big.NewInt(10000000000)) diff --git a/integrationTests/vm/txsFee/multiShard/moveBalance_test.go b/integrationTests/vm/txsFee/multiShard/moveBalance_test.go index 41e404d4af7..dcf42bce5b9 100644 --- a/integrationTests/vm/txsFee/multiShard/moveBalance_test.go +++ b/integrationTests/vm/txsFee/multiShard/moveBalance_test.go @@ -14,7 +14,11 @@ import ( ) func TestMoveBalanceShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -49,9 +53,11 @@ func TestMoveBalanceShouldWork(t *testing.T) { } func TestMoveBalanceContractAddressDataFieldNilShouldConsumeGas(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } - testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -89,9 +95,11 @@ func TestMoveBalanceContractAddressDataFieldNilShouldConsumeGas(t *testing.T) { } func TestMoveBalanceContractAddressDataFieldNotNilShouldConsumeGas(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } - testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -129,11 +137,15 @@ func TestMoveBalanceContractAddressDataFieldNotNilShouldConsumeGas(t *testing.T) } func TestMoveBalanceExecuteOneSourceAndDestinationShard(t *testing.T) { - testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextSource.Close() - testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextDst.Close() diff --git a/integrationTests/vm/txsFee/multiShard/nftTransferUpdate_test.go b/integrationTests/vm/txsFee/multiShard/nftTransferUpdate_test.go index 3a0b19b0b24..4a15002f5c0 100644 --- a/integrationTests/vm/txsFee/multiShard/nftTransferUpdate_test.go +++ b/integrationTests/vm/txsFee/multiShard/nftTransferUpdate_test.go @@ -15,6 +15,10 @@ import ( ) func TestNFTTransferAndUpdateOnOldTypeToken(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ CheckCorrectTokenIDForTransferRoleEnableEpoch: 3, DisableExecByCallerEnableEpoch: 3, @@ -36,11 +40,11 @@ func TestNFTTransferAndUpdateOnOldTypeToken(t *testing.T) { initialAttribute := []byte("initial attribute") newAttribute := []byte("new attribute") - sh0Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs) + sh0Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs, 1) require.Nil(t, err) defer sh0Context.Close() - sh1Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs) + sh1Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs, 1) require.Nil(t, err) defer sh1Context.Close() diff --git a/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go b/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go index a97a5bfd7fe..49a0e256483 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedBuiltInFunctions_test.go @@ -15,16 +15,16 @@ import ( ) func TestRelayedBuiltInFunctionExecuteOnRelayerAndDstShardShouldWork(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( 2, config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, - }) + }, + 1) require.Nil(t, err) defer testContextRelayer.Close() @@ -32,7 +32,8 @@ func TestRelayedBuiltInFunctionExecuteOnRelayerAndDstShardShouldWork(t *testing. 1, config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, - }) + }, + 1) require.Nil(t, err) defer testContextInner.Close() diff --git a/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go b/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go index 2dd36161143..2d2013fd0e8 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedMoveBalance_test.go @@ -13,363 +13,469 @@ import ( "github.com/stretchr/testify/require" ) -func TestRelayedMoveBalanceRelayerShard0InnerTxSenderAndReceiverShard1ShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) - require.Nil(t, err) - defer testContext.Close() - - relayerAddr := []byte("12345678901234567890123456789030") - shardID := testContext.ShardCoordinator.ComputeId(relayerAddr) - require.Equal(t, uint32(0), shardID) - - sndAddr := []byte("12345678901234567890123456789011") - shardID = testContext.ShardCoordinator.ComputeId(sndAddr) - require.Equal(t, uint32(1), shardID) +const ( + minGasLimit = uint64(1) + gasPriceModifier = float64(0.1) +) - rcvAddr := []byte("12345678901234567890123456789021") - shardID = testContext.ShardCoordinator.ComputeId(rcvAddr) - require.Equal(t, uint32(1), shardID) +func TestRelayedMoveBalanceRelayerShard0InnerTxSenderAndReceiverShard1ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } - gasPrice := uint64(10) - gasLimit := uint64(100) + t.Run("before relayed base cost fix", testRelayedMoveBalanceRelayerShard0InnerTxSenderAndReceiverShard1ShouldWork(integrationTests.UnreachableEpoch)) +} - userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, []byte("aaaa")) +func testRelayedMoveBalanceRelayerShard0InnerTxSenderAndReceiverShard1ShouldWork(relayedFixActivationEpoch uint32) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() - rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + relayerAddr := []byte("12345678901234567890123456789030") + shardID := testContext.ShardCoordinator.ComputeId(relayerAddr) + require.Equal(t, uint32(0), shardID) - retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.Ok, retCode) - require.Nil(t, err) + sndAddr := []byte("12345678901234567890123456789011") + shardID = testContext.ShardCoordinator.ComputeId(sndAddr) + require.Equal(t, uint32(1), shardID) - _, err = testContext.Accounts.Commit() - require.Nil(t, err) + rcvAddr := []byte("12345678901234567890123456789021") + shardID = testContext.ShardCoordinator.ComputeId(rcvAddr) + require.Equal(t, uint32(1), shardID) - // check balance inner tx sender - utils.TestAccount(t, testContext.Accounts, sndAddr, 1, big.NewInt(0)) + gasPrice := uint64(10) + gasLimit := uint64(100) - // check balance inner tx receiver - utils.TestAccount(t, testContext.Accounts, rcvAddr, 0, big.NewInt(100)) + _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(100)) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(3000)) - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(1000), accumulatedFees) -} + userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, []byte("aaaa")) -func TestRelayedMoveBalanceRelayerAndInnerTxSenderShard0ReceiverShard1(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) - require.Nil(t, err) - defer testContext.Close() + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := gasLimit + minGasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - relayerAddr := []byte("12345678901234567890123456789030") - shardID := testContext.ShardCoordinator.ComputeId(relayerAddr) - require.Equal(t, uint32(0), shardID) + retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) - sndAddr := []byte("12345678901234567890123456789011") - shardID = testContext.ShardCoordinator.ComputeId(sndAddr) - require.Equal(t, uint32(1), shardID) + _, err = testContext.Accounts.Commit() + require.Nil(t, err) - scAddress := "00000000000000000000dbb53e4b23392b0d6f36cce32deb2d623e9625ab3132" - scAddrBytes, _ := hex.DecodeString(scAddress) - scAddrBytes[31] = 1 - shardID = testContext.ShardCoordinator.ComputeId(scAddrBytes) - require.Equal(t, uint32(1), shardID) + // check balance inner tx sender + utils.TestAccount(t, testContext.Accounts, sndAddr, 1, big.NewInt(0)) - gasPrice := uint64(10) - gasLimit := uint64(100) + // check balance inner tx receiver + utils.TestAccount(t, testContext.Accounts, rcvAddr, 0, big.NewInt(100)) - userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddrBytes, gasPrice, gasLimit, nil) - - rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - - retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.UserError, retCode) - require.Nil(t, err) + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, big.NewInt(100), accumulatedFees) + } +} - _, err = testContext.Accounts.Commit() - require.Nil(t, err) +func TestRelayedMoveBalanceRelayerAndInnerTxSenderShard0ReceiverShard1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } - // check inner tx receiver - account, err := testContext.Accounts.GetExistingAccount(scAddrBytes) - require.Nil(t, account) - require.NotNil(t, err) + t.Run("before relayed base cost fix", testRelayedMoveBalanceRelayerAndInnerTxSenderShard0ReceiverShard1(integrationTests.UnreachableEpoch)) +} - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(1000), accumulatedFees) +func testRelayedMoveBalanceRelayerAndInnerTxSenderShard0ReceiverShard1(relayedFixActivationEpoch uint32) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() + + relayerAddr := []byte("12345678901234567890123456789030") + shardID := testContext.ShardCoordinator.ComputeId(relayerAddr) + require.Equal(t, uint32(0), shardID) + + sndAddr := []byte("12345678901234567890123456789011") + shardID = testContext.ShardCoordinator.ComputeId(sndAddr) + require.Equal(t, uint32(1), shardID) + + scAddress := "00000000000000000000dbb53e4b23392b0d6f36cce32deb2d623e9625ab3132" + scAddrBytes, _ := hex.DecodeString(scAddress) + scAddrBytes[31] = 1 + shardID = testContext.ShardCoordinator.ComputeId(scAddrBytes) + require.Equal(t, uint32(1), shardID) + + gasPrice := uint64(10) + gasLimit := uint64(100) + + userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddrBytes, gasPrice, gasLimit, nil) + + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := gasLimit + minGasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + + retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.UserError, retCode) + require.Nil(t, err) + + _, err = testContext.Accounts.Commit() + require.Nil(t, err) + + // check inner tx receiver + account, err := testContext.Accounts.GetExistingAccount(scAddrBytes) + require.Nil(t, account) + require.NotNil(t, err) + + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, big.NewInt(100), accumulatedFees) + } } func TestRelayedMoveBalanceExecuteOnSourceAndDestination(t *testing.T) { - testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) - require.Nil(t, err) - defer testContextSource.Close() - - testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) - require.Nil(t, err) - defer testContextDst.Close() - - relayerAddr := []byte("12345678901234567890123456789030") - shardID := testContextSource.ShardCoordinator.ComputeId(relayerAddr) - require.Equal(t, uint32(0), shardID) + if testing.Short() { + t.Skip("this is not a short test") + } - sndAddr := []byte("12345678901234567890123456789011") - shardID = testContextSource.ShardCoordinator.ComputeId(sndAddr) - require.Equal(t, uint32(1), shardID) - - scAddress := "00000000000000000000dbb53e4b23392b0d6f36cce32deb2d623e9625ab3132" - scAddrBytes, _ := hex.DecodeString(scAddress) - scAddrBytes[31] = 1 - shardID = testContextSource.ShardCoordinator.ComputeId(scAddrBytes) - require.Equal(t, uint32(1), shardID) - - gasPrice := uint64(10) - gasLimit := uint64(100) - - _, _ = vm.CreateAccount(testContextSource.Accounts, relayerAddr, 0, big.NewInt(100000)) - - userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddrBytes, gasPrice, gasLimit, nil) - - rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - - // execute on source shard - retCode, err := testContextSource.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.Ok, retCode) - require.Nil(t, err) - - // check relayed balance - utils.TestAccount(t, testContextSource.Accounts, relayerAddr, 1, big.NewInt(97270)) - - // check accumulated fees - accumulatedFees := testContextSource.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(1630), accumulatedFees) - - // execute on destination shard - retCode, err = testContextDst.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.UserError, retCode) - require.Nil(t, err) - - _, err = testContextDst.Accounts.Commit() - require.Nil(t, err) - - // check inner tx receiver - account, err := testContextDst.Accounts.GetExistingAccount(scAddrBytes) - require.Nil(t, account) - require.NotNil(t, err) + t.Run("before relayed base cost fix", testRelayedMoveBalanceExecuteOnSourceAndDestination(integrationTests.UnreachableEpoch)) +} - // check accumulated fees - accumulatedFees = testContextDst.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(1000), accumulatedFees) +func testRelayedMoveBalanceExecuteOnSourceAndDestination(relayedFixActivationEpoch uint32) func(t *testing.T) { + return func(t *testing.T) { + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContextSource.Close() + + testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContextDst.Close() + + relayerAddr := []byte("12345678901234567890123456789030") + shardID := testContextSource.ShardCoordinator.ComputeId(relayerAddr) + require.Equal(t, uint32(0), shardID) + + sndAddr := []byte("12345678901234567890123456789011") + shardID = testContextSource.ShardCoordinator.ComputeId(sndAddr) + require.Equal(t, uint32(1), shardID) + + scAddress := "00000000000000000000dbb53e4b23392b0d6f36cce32deb2d623e9625ab3132" + scAddrBytes, _ := hex.DecodeString(scAddress) + scAddrBytes[31] = 1 + shardID = testContextSource.ShardCoordinator.ComputeId(scAddrBytes) + require.Equal(t, uint32(1), shardID) + + gasPrice := uint64(10) + gasLimit := uint64(100) + + _, _ = vm.CreateAccount(testContextSource.Accounts, relayerAddr, 0, big.NewInt(100000)) + _, _ = vm.CreateAccount(testContextSource.Accounts, sndAddr, 0, big.NewInt(100)) + + userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddrBytes, gasPrice, gasLimit, nil) + + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + + // execute on source shard + retCode, err := testContextSource.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + // check relayed balance + // 100000 - rTxFee(163)*gasPrice(10) - txFeeInner(1000*gasPriceModifier(0.1)) = 98270 + utils.TestAccount(t, testContextSource.Accounts, relayerAddr, 1, big.NewInt(98270)) + + // check accumulated fees + accumulatedFees := testContextSource.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, big.NewInt(1630), accumulatedFees) + + // execute on destination shard + retCode, err = testContextDst.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.UserError, retCode) + require.Nil(t, err) + + _, err = testContextDst.Accounts.Commit() + require.Nil(t, err) + + // check inner tx receiver + account, err := testContextDst.Accounts.GetExistingAccount(scAddrBytes) + require.Nil(t, account) + require.NotNil(t, err) + + // check accumulated fees + accumulatedFees = testContextDst.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, big.NewInt(100), accumulatedFees) + } } func TestRelayedMoveBalanceExecuteOnSourceAndDestinationRelayerAndInnerTxSenderShard0InnerTxReceiverShard1ShouldWork(t *testing.T) { - testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) - require.Nil(t, err) - defer testContextSource.Close() - - testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) - require.Nil(t, err) - defer testContextDst.Close() - - relayerAddr := []byte("12345678901234567890123456789030") - shardID := testContextSource.ShardCoordinator.ComputeId(relayerAddr) - require.Equal(t, uint32(0), shardID) - - sndAddr := []byte("12345678901234567890123456789010") - shardID = testContextSource.ShardCoordinator.ComputeId(sndAddr) - require.Equal(t, uint32(0), shardID) + if testing.Short() { + t.Skip("this is not a short test") + } - rcvAddr := []byte("12345678901234567890123456789011") - shardID = testContextSource.ShardCoordinator.ComputeId(rcvAddr) - require.Equal(t, uint32(1), shardID) - - gasPrice := uint64(10) - gasLimit := uint64(100) - - _, _ = vm.CreateAccount(testContextSource.Accounts, relayerAddr, 0, big.NewInt(100000)) - - userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, nil) - - rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - - // execute on source shard - retCode, err := testContextSource.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.Ok, retCode) - require.Nil(t, err) - - // check relayed balance - utils.TestAccount(t, testContextSource.Accounts, relayerAddr, 1, big.NewInt(97270)) - // check inner tx sender - utils.TestAccount(t, testContextSource.Accounts, sndAddr, 1, big.NewInt(0)) - - // check accumulated fees - accumulatedFees := testContextSource.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(2630), accumulatedFees) - - // get scr for destination shard - txs := testContextSource.GetIntermediateTransactions(t) - scr := txs[0] + t.Run("before relayed base cost fix", testRelayedMoveBalanceExecuteOnSourceAndDestinationRelayerAndInnerTxSenderShard0InnerTxReceiverShard1ShouldWork(integrationTests.UnreachableEpoch)) + t.Run("after relayed base cost fix", testRelayedMoveBalanceExecuteOnSourceAndDestinationRelayerAndInnerTxSenderShard0InnerTxReceiverShard1ShouldWork(0)) +} - utils.ProcessSCRResult(t, testContextDst, scr, vmcommon.Ok, nil) +func testRelayedMoveBalanceExecuteOnSourceAndDestinationRelayerAndInnerTxSenderShard0InnerTxReceiverShard1ShouldWork(relayedFixActivationEpoch uint32) func(t *testing.T) { + return func(t *testing.T) { + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContextSource.Close() + + testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContextDst.Close() + + relayerAddr := []byte("12345678901234567890123456789030") + shardID := testContextSource.ShardCoordinator.ComputeId(relayerAddr) + require.Equal(t, uint32(0), shardID) + + sndAddr := []byte("12345678901234567890123456789010") + shardID = testContextSource.ShardCoordinator.ComputeId(sndAddr) + require.Equal(t, uint32(0), shardID) + + rcvAddr := []byte("12345678901234567890123456789011") + shardID = testContextSource.ShardCoordinator.ComputeId(rcvAddr) + require.Equal(t, uint32(1), shardID) + + gasPrice := uint64(10) + gasLimit := uint64(100) + + _, _ = vm.CreateAccount(testContextSource.Accounts, relayerAddr, 0, big.NewInt(100000)) + _, _ = vm.CreateAccount(testContextSource.Accounts, sndAddr, 0, big.NewInt(100)) + + userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, nil) + + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := gasLimit + minGasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + + // execute on source shard + retCode, err := testContextSource.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + // check relayed balance + // before base cost fix: 100000 - rTxFee(163)*gasPrice(10) - innerTxFee(1000*gasPriceModifier(0.1)) = 98270 + // after base cost fix: 100000 - rTxFee(163)*gasPrice(10) - innerTxFee(10) = 98360 + expectedRelayerBalance := big.NewInt(98270) + expectedAccumulatedFees := big.NewInt(1730) + if relayedFixActivationEpoch != integrationTests.UnreachableEpoch { + expectedRelayerBalance = big.NewInt(98360) + expectedAccumulatedFees = big.NewInt(1640) + } + utils.TestAccount(t, testContextSource.Accounts, relayerAddr, 1, expectedRelayerBalance) + // check inner tx sender + utils.TestAccount(t, testContextSource.Accounts, sndAddr, 1, big.NewInt(0)) + + // check accumulated fees + accumulatedFees := testContextSource.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccumulatedFees, accumulatedFees) + + // get scr for destination shard + txs := testContextSource.GetIntermediateTransactions(t) + scr := txs[0] + + utils.ProcessSCRResult(t, testContextDst, scr, vmcommon.Ok, nil) + + // check balance receiver + utils.TestAccount(t, testContextDst.Accounts, rcvAddr, 0, big.NewInt(100)) + + // check accumulated fess + accumulatedFees = testContextDst.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, big.NewInt(0), accumulatedFees) + } +} - // check balance receiver - utils.TestAccount(t, testContextDst.Accounts, rcvAddr, 0, big.NewInt(100)) +func TestRelayedMoveBalanceRelayerAndInnerTxReceiverShard0SenderShard1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } - // check accumulated fess - accumulatedFees = testContextDst.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(0), accumulatedFees) + t.Run("before relayed base cost fix", testRelayedMoveBalanceRelayerAndInnerTxReceiverShard0SenderShard1(integrationTests.UnreachableEpoch)) } -func TestRelayedMoveBalanceRelayerAndInnerTxReceiverShard0SenderShard1(t *testing.T) { - testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) - require.Nil(t, err) - defer testContextSource.Close() +func testRelayedMoveBalanceRelayerAndInnerTxReceiverShard0SenderShard1(relayedFixActivationEpoch uint32) func(t *testing.T) { + return func(t *testing.T) { + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContextSource.Close() - testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) - require.Nil(t, err) - defer testContextDst.Close() + testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContextDst.Close() - relayerAddr := []byte("12345678901234567890123456789030") - shardID := testContextSource.ShardCoordinator.ComputeId(relayerAddr) - require.Equal(t, uint32(0), shardID) + relayerAddr := []byte("12345678901234567890123456789030") + shardID := testContextSource.ShardCoordinator.ComputeId(relayerAddr) + require.Equal(t, uint32(0), shardID) - sndAddr := []byte("12345678901234567890123456789011") - shardID = testContextSource.ShardCoordinator.ComputeId(sndAddr) - require.Equal(t, uint32(1), shardID) + sndAddr := []byte("12345678901234567890123456789011") + shardID = testContextSource.ShardCoordinator.ComputeId(sndAddr) + require.Equal(t, uint32(1), shardID) - rcvAddr := []byte("12345678901234567890123456789010") - shardID = testContextSource.ShardCoordinator.ComputeId(rcvAddr) - require.Equal(t, uint32(0), shardID) + rcvAddr := []byte("12345678901234567890123456789010") + shardID = testContextSource.ShardCoordinator.ComputeId(rcvAddr) + require.Equal(t, uint32(0), shardID) - gasPrice := uint64(10) - gasLimit := uint64(100) + gasPrice := uint64(10) + gasLimit := uint64(100) - _, _ = vm.CreateAccount(testContextSource.Accounts, relayerAddr, 0, big.NewInt(100000)) + _, _ = vm.CreateAccount(testContextSource.Accounts, relayerAddr, 0, big.NewInt(100000)) + _, _ = vm.CreateAccount(testContextDst.Accounts, sndAddr, 0, big.NewInt(100)) - innerTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, nil) + innerTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, nil) - rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - // execute on relayer shard - retCode, err := testContextSource.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.Ok, retCode) - require.Nil(t, err) + // execute on relayer shard + retCode, err := testContextSource.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) - // check relayed balance - utils.TestAccount(t, testContextSource.Accounts, relayerAddr, 1, big.NewInt(97270)) + // check relayed balance + // 100000 - rTxFee(163)*gasPrice(10) - innerTxFee(1000*gasPriceModifier(0.1)) = 98270 + utils.TestAccount(t, testContextSource.Accounts, relayerAddr, 1, big.NewInt(98270)) - // check inner Tx receiver - innerTxSenderAccount, err := testContextSource.Accounts.GetExistingAccount(sndAddr) - require.Nil(t, innerTxSenderAccount) - require.NotNil(t, err) + // check inner Tx receiver + innerTxSenderAccount, err := testContextSource.Accounts.GetExistingAccount(sndAddr) + require.Nil(t, innerTxSenderAccount) + require.NotNil(t, err) - // check accumulated fees - accumulatedFees := testContextSource.TxFeeHandler.GetAccumulatedFees() - expectedAccFees := big.NewInt(1630) - require.Equal(t, expectedAccFees, accumulatedFees) + // check accumulated fees + accumulatedFees := testContextSource.TxFeeHandler.GetAccumulatedFees() + expectedAccFees := big.NewInt(1630) + require.Equal(t, expectedAccFees, accumulatedFees) - // execute on destination shard - retCode, err = testContextDst.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.Ok, retCode) - require.Nil(t, err) + // execute on destination shard + retCode, err = testContextDst.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) - utils.TestAccount(t, testContextDst.Accounts, sndAddr, 1, big.NewInt(0)) + utils.TestAccount(t, testContextDst.Accounts, sndAddr, 1, big.NewInt(0)) - // check accumulated fees - accumulatedFees = testContextDst.TxFeeHandler.GetAccumulatedFees() - expectedAccFees = big.NewInt(1000) - require.Equal(t, expectedAccFees, accumulatedFees) + // check accumulated fees + accumulatedFees = testContextDst.TxFeeHandler.GetAccumulatedFees() + expectedAccFees = big.NewInt(100) + require.Equal(t, expectedAccFees, accumulatedFees) - txs := testContextDst.GetIntermediateTransactions(t) - scr := txs[0] + txs := testContextDst.GetIntermediateTransactions(t) + scr := txs[0] - // execute generated SCR from shard1 on shard 0 - utils.ProcessSCRResult(t, testContextSource, scr, vmcommon.Ok, nil) + // execute generated SCR from shard1 on shard 0 + utils.ProcessSCRResult(t, testContextSource, scr, vmcommon.Ok, nil) - // check receiver balance - utils.TestAccount(t, testContextSource.Accounts, rcvAddr, 0, big.NewInt(100)) + // check receiver balance + utils.TestAccount(t, testContextSource.Accounts, rcvAddr, 0, big.NewInt(100)) + } } func TestMoveBalanceRelayerShard0InnerTxSenderShard1InnerTxReceiverShard2ShouldWork(t *testing.T) { - testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) - require.Nil(t, err) - defer testContextRelayer.Close() - - testContextInnerSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) - require.Nil(t, err) - defer testContextInnerSource.Close() - - testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}) - require.Nil(t, err) - defer testContextDst.Close() - - relayerAddr := []byte("12345678901234567890123456789030") - shardID := testContextRelayer.ShardCoordinator.ComputeId(relayerAddr) - require.Equal(t, uint32(0), shardID) + if testing.Short() { + t.Skip("this is not a short test") + } - sndAddr := []byte("12345678901234567890123456789011") - shardID = testContextRelayer.ShardCoordinator.ComputeId(sndAddr) - require.Equal(t, uint32(1), shardID) - - rcvAddr := []byte("12345678901234567890123456789012") - shardID = testContextRelayer.ShardCoordinator.ComputeId(rcvAddr) - require.Equal(t, uint32(2), shardID) - - gasPrice := uint64(10) - gasLimit := uint64(100) - - _, _ = vm.CreateAccount(testContextRelayer.Accounts, relayerAddr, 0, big.NewInt(100000)) - - innerTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, nil) - - rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - - // execute on relayer shard - retCode, err := testContextRelayer.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.Ok, retCode) - require.Nil(t, err) - - // check relayed balance - utils.TestAccount(t, testContextRelayer.Accounts, relayerAddr, 1, big.NewInt(97270)) - - // check inner Tx receiver - innerTxSenderAccount, err := testContextRelayer.Accounts.GetExistingAccount(sndAddr) - require.Nil(t, innerTxSenderAccount) - require.NotNil(t, err) - - // check accumulated fees - accumulatedFees := testContextRelayer.TxFeeHandler.GetAccumulatedFees() - expectedAccFees := big.NewInt(1630) - require.Equal(t, expectedAccFees, accumulatedFees) - - // execute on inner tx sender shard - retCode, err = testContextInnerSource.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.Ok, retCode) - require.Nil(t, err) - - utils.TestAccount(t, testContextInnerSource.Accounts, sndAddr, 1, big.NewInt(0)) - - // check accumulated fees - accumulatedFees = testContextInnerSource.TxFeeHandler.GetAccumulatedFees() - expectedAccFees = big.NewInt(1000) - require.Equal(t, expectedAccFees, accumulatedFees) - - // execute on inner tx receiver shard - txs := testContextInnerSource.GetIntermediateTransactions(t) - scr := txs[0] - - utils.ProcessSCRResult(t, testContextDst, scr, vmcommon.Ok, nil) + t.Run("before relayed base cost fix", testMoveBalanceRelayerShard0InnerTxSenderShard1InnerTxReceiverShard2ShouldWork(integrationTests.UnreachableEpoch)) +} - // check receiver balance - utils.TestAccount(t, testContextDst.Accounts, rcvAddr, 0, big.NewInt(100)) +func testMoveBalanceRelayerShard0InnerTxSenderShard1InnerTxReceiverShard2ShouldWork(relayedFixActivationEpoch uint32) func(t *testing.T) { + return func(t *testing.T) { + testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContextRelayer.Close() + + testContextInnerSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContextInnerSource.Close() + + testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContextDst.Close() + + relayerAddr := []byte("12345678901234567890123456789030") + shardID := testContextRelayer.ShardCoordinator.ComputeId(relayerAddr) + require.Equal(t, uint32(0), shardID) + + sndAddr := []byte("12345678901234567890123456789011") + shardID = testContextRelayer.ShardCoordinator.ComputeId(sndAddr) + require.Equal(t, uint32(1), shardID) + + rcvAddr := []byte("12345678901234567890123456789012") + shardID = testContextRelayer.ShardCoordinator.ComputeId(rcvAddr) + require.Equal(t, uint32(2), shardID) + + gasPrice := uint64(10) + gasLimit := uint64(100) + + _, _ = vm.CreateAccount(testContextRelayer.Accounts, relayerAddr, 0, big.NewInt(100000)) + _, _ = vm.CreateAccount(testContextInnerSource.Accounts, sndAddr, 0, big.NewInt(100)) + + innerTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, nil) + + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + + // execute on relayer shard + retCode, err := testContextRelayer.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + // check relayed balance + // 100000 - rTxFee(164)*gasPrice(10) - innerTxFee(1000*gasPriceModifier(0.1)) = 98270 + utils.TestAccount(t, testContextRelayer.Accounts, relayerAddr, 1, big.NewInt(98270)) + + // check inner Tx receiver + innerTxSenderAccount, err := testContextRelayer.Accounts.GetExistingAccount(sndAddr) + require.Nil(t, innerTxSenderAccount) + require.NotNil(t, err) + + // check accumulated fees + accumulatedFees := testContextRelayer.TxFeeHandler.GetAccumulatedFees() + expectedAccFees := big.NewInt(1630) + require.Equal(t, expectedAccFees, accumulatedFees) + + // execute on inner tx sender shard + retCode, err = testContextInnerSource.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + utils.TestAccount(t, testContextInnerSource.Accounts, sndAddr, 1, big.NewInt(0)) + + // check accumulated fees + accumulatedFees = testContextInnerSource.TxFeeHandler.GetAccumulatedFees() + expectedAccFees = big.NewInt(100) + require.Equal(t, expectedAccFees, accumulatedFees) + + // execute on inner tx receiver shard + txs := testContextInnerSource.GetIntermediateTransactions(t) + scr := txs[0] + + utils.ProcessSCRResult(t, testContextDst, scr, vmcommon.Ok, nil) + + // check receiver balance + utils.TestAccount(t, testContextDst.Accounts, rcvAddr, 0, big.NewInt(100)) + } } diff --git a/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go b/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go index 499fbe5c6ee..de22bb57d60 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedScDeploy_test.go @@ -1,5 +1,3 @@ -//go:build !race - package multiShard import ( @@ -16,11 +14,15 @@ import ( ) func TestRelayedSCDeployShouldWork(t *testing.T) { - testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextRelayer.Close() - testContextInner, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + testContextInner, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextInner.Close() diff --git a/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go b/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go index 8e0229fef08..736783b11ae 100644 --- a/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go +++ b/integrationTests/vm/txsFee/multiShard/relayedTxScCalls_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package multiShard import ( @@ -27,19 +23,23 @@ import ( // 4. Execute SCR with the smart contract call on shard 1 // 5. Execute SCR with refund on relayer shard (shard 2) func TestRelayedTxScCallMultiShardShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, } - testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, enableEpochs) + testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, enableEpochs, 1) require.Nil(t, err) defer testContextRelayer.Close() - testContextInnerSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs) + testContextInnerSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs, 1) require.Nil(t, err) defer testContextInnerSource.Close() - testContextInnerDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs) + testContextInnerDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs, 1) require.Nil(t, err) defer testContextInnerDst.Close() @@ -58,14 +58,14 @@ func TestRelayedTxScCallMultiShardShouldWork(t *testing.T) { require.Equal(t, uint32(2), testContextInnerDst.ShardCoordinator.ComputeId(relayerAddr)) gasPrice := uint64(10) - gasLimit := uint64(500) + gasLimit := uint64(50000) innerTx := vm.CreateTransaction(0, big.NewInt(0), sndAddr, scAddr, gasPrice, gasLimit, []byte("increment")) rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - _, _ = vm.CreateAccount(testContextRelayer.Accounts, relayerAddr, 0, big.NewInt(10000)) + _, _ = vm.CreateAccount(testContextRelayer.Accounts, relayerAddr, 0, big.NewInt(10000000)) // execute on relayer shard retCode, err := testContextRelayer.TxProcessor.ProcessTransaction(rtx) @@ -75,12 +75,12 @@ func TestRelayedTxScCallMultiShardShouldWork(t *testing.T) { _, err = testContextRelayer.Accounts.Commit() require.Nil(t, err) - expectedBalance := big.NewInt(3130) + expectedBalance := big.NewInt(9498110) utils.TestAccount(t, testContextRelayer.Accounts, relayerAddr, 1, expectedBalance) // check accumulated fees accumulatedFees := testContextRelayer.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(1870), accumulatedFees) + require.Equal(t, big.NewInt(1890), accumulatedFees) developerFees := testContextRelayer.TxFeeHandler.GetDeveloperFees() require.Equal(t, big.NewInt(0), developerFees) @@ -115,36 +115,40 @@ func TestRelayedTxScCallMultiShardShouldWork(t *testing.T) { // check accumulated fees dest accumulatedFees = testContextInnerDst.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(3770), accumulatedFees) + require.Equal(t, big.NewInt(156630), accumulatedFees) developerFees = testContextInnerDst.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(377), developerFees) + require.Equal(t, big.NewInt(15663), developerFees) txs = testContextInnerDst.GetIntermediateTransactions(t) scr = txs[0] utils.ProcessSCRResult(t, testContextRelayer, scr, vmcommon.Ok, nil) - expectedBalance = big.NewInt(4260) + expectedBalance = big.NewInt(9841380) utils.TestAccount(t, testContextRelayer.Accounts, relayerAddr, 1, expectedBalance) // check accumulated fees accumulatedFees = testContextRelayer.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(1870), accumulatedFees) + require.Equal(t, big.NewInt(1890), accumulatedFees) developerFees = testContextRelayer.TxFeeHandler.GetDeveloperFees() require.Equal(t, big.NewInt(0), developerFees) } func TestRelayedTxScCallMultiShardFailOnInnerTxDst(t *testing.T) { - testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextRelayer, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextRelayer.Close() - testContextInnerSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + testContextInnerSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextInnerSource.Close() - testContextInnerDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + testContextInnerDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextInnerDst.Close() diff --git a/integrationTests/vm/txsFee/multiShard/scCallWithValueTransfer_test.go b/integrationTests/vm/txsFee/multiShard/scCallWithValueTransfer_test.go index bcb14308bab..c2a7356d1f3 100644 --- a/integrationTests/vm/txsFee/multiShard/scCallWithValueTransfer_test.go +++ b/integrationTests/vm/txsFee/multiShard/scCallWithValueTransfer_test.go @@ -1,5 +1,3 @@ -//go:build !race - package multiShard import ( @@ -16,22 +14,30 @@ import ( ) func TestDeployContractAndTransferValueSCProcessorV1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testDeployContractAndTransferValue(t, 1000) } func TestDeployContractAndTransferValueSCProcessorV2(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testDeployContractAndTransferValue(t, 0) } func testDeployContractAndTransferValue(t *testing.T, scProcessorV2EnabledEpoch uint32) { - testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextSource.Close() configEnabledEpochs := config.EnableEpochs{} configEnabledEpochs.SCProcessorV2EnableEpoch = scProcessorV2EnabledEpoch - testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, configEnabledEpochs) + testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, configEnabledEpochs, 1) require.Nil(t, err) defer testContextDst.Close() diff --git a/integrationTests/vm/txsFee/multiShard/scCalls_test.go b/integrationTests/vm/txsFee/multiShard/scCalls_test.go index 42e1dc824c1..9eb2d85fbe0 100644 --- a/integrationTests/vm/txsFee/multiShard/scCalls_test.go +++ b/integrationTests/vm/txsFee/multiShard/scCalls_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package multiShard import ( @@ -9,7 +5,6 @@ import ( "testing" "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -17,15 +12,17 @@ import ( ) func TestScCallExecuteOnSourceAndDstShardShouldWork(t *testing.T) { - enableEpochs := config.EnableEpochs{ - DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, + if testing.Short() { + t.Skip("this is not a short test") } - testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs) + enableEpochs := config.EnableEpochs{} + + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, enableEpochs, 1) require.Nil(t, err) defer testContextSource.Close() - testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs) + testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs, 1) require.Nil(t, err) defer testContextDst.Close() @@ -42,9 +39,9 @@ func TestScCallExecuteOnSourceAndDstShardShouldWork(t *testing.T) { require.Equal(t, uint32(0), shardID) gasPrice := uint64(10) - gasLimit := uint64(500) + gasLimit := uint64(50000) - _, _ = vm.CreateAccount(testContextSource.Accounts, sndAddr, 0, big.NewInt(10000)) + _, _ = vm.CreateAccount(testContextSource.Accounts, sndAddr, 0, big.NewInt(10000000)) tx := vm.CreateTransaction(0, big.NewInt(0), sndAddr, scAddr, gasPrice, gasLimit, []byte("increment")) @@ -56,7 +53,7 @@ func TestScCallExecuteOnSourceAndDstShardShouldWork(t *testing.T) { _, err = testContextSource.Accounts.Commit() require.Nil(t, err) - expectedBalance := big.NewInt(5000) + expectedBalance := big.NewInt(9500000) vm.TestAccount(t, testContextSource.Accounts, sndAddr, 1, expectedBalance) // check accumulated fees @@ -76,10 +73,10 @@ func TestScCallExecuteOnSourceAndDstShardShouldWork(t *testing.T) { // check accumulated fees dest accumulatedFees = testContextDst.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(3770), accumulatedFees) + require.Equal(t, big.NewInt(156630), accumulatedFees) developerFees = testContextDst.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(377), developerFees) + require.Equal(t, big.NewInt(15663), developerFees) // execute sc result with gas refund txs := testContextDst.GetIntermediateTransactions(t) @@ -88,7 +85,7 @@ func TestScCallExecuteOnSourceAndDstShardShouldWork(t *testing.T) { utils.ProcessSCRResult(t, testContextSource, scr, vmcommon.Ok, nil) // check sender balance after refund - expectedBalance = big.NewInt(6130) + expectedBalance = big.NewInt(9843270) vm.TestAccount(t, testContextSource.Accounts, sndAddr, 1, expectedBalance) // check accumulated fees @@ -97,11 +94,15 @@ func TestScCallExecuteOnSourceAndDstShardShouldWork(t *testing.T) { } func TestScCallExecuteOnSourceAndDstShardInvalidOnDst(t *testing.T) { - testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextSource, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextSource.Close() - testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + testContextDst, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextDst.Close() diff --git a/integrationTests/vm/txsFee/relayedAsyncCall_test.go b/integrationTests/vm/txsFee/relayedAsyncCall_test.go index b782f318432..9b4e243ec6a 100644 --- a/integrationTests/vm/txsFee/relayedAsyncCall_test.go +++ b/integrationTests/vm/txsFee/relayedAsyncCall_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -19,6 +15,10 @@ import ( ) func TestRelayedAsyncCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddr := []byte("12345678901234567890123456789011") t.Run("nonce fix is disabled, should increase the sender's nonce", func(t *testing.T) { @@ -42,7 +42,7 @@ func TestRelayedAsyncCallShouldWork(t *testing.T) { } func testRelayedAsyncCallShouldWork(t *testing.T, enableEpochs config.EnableEpochs, senderAddr []byte) *vm.VMTestContext { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(enableEpochs) + testContext, err := vm.CreatePreparedTxProcessorWithVMs(enableEpochs, 1) require.Nil(t, err) localEgldBalance := big.NewInt(100000000) diff --git a/integrationTests/vm/txsFee/relayedAsyncESDT_test.go b/integrationTests/vm/txsFee/relayedAsyncESDT_test.go index 061a884b268..bb8b05606a7 100644 --- a/integrationTests/vm/txsFee/relayedAsyncESDT_test.go +++ b/integrationTests/vm/txsFee/relayedAsyncESDT_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -18,9 +14,13 @@ import ( ) func TestRelayedAsyncESDTCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + }, 1) require.Nil(t, err) defer testContext.Close() @@ -69,16 +69,20 @@ func TestRelayedAsyncESDTCallShouldWork(t *testing.T) { utils.CheckESDTBalance(t, testContext, firstSCAddress, token, big.NewInt(2500)) utils.CheckESDTBalance(t, testContext, secondSCAddress, token, big.NewInt(2500)) - expectedSenderBalance := big.NewInt(94996430) + expectedSenderBalance := big.NewInt(98219900) utils.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedSenderBalance) - expectedAccumulatedFees := big.NewInt(5003570) + expectedAccumulatedFees := big.NewInt(1780100) accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() require.Equal(t, expectedAccumulatedFees, accumulatedFees) } func TestRelayedAsyncESDTCall_InvalidCallFirstContract(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -136,7 +140,11 @@ func TestRelayedAsyncESDTCall_InvalidCallFirstContract(t *testing.T) { } func TestRelayedAsyncESDTCall_InvalidOutOfGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go b/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go index dd82f276e27..7688f147729 100644 --- a/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go +++ b/integrationTests/vm/txsFee/relayedBuiltInFunctions_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -20,105 +16,140 @@ import ( ) func TestRelayedBuildInFunctionChangeOwnerCallShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs( - config.EnableEpochs{ - PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, - }) - require.Nil(t, err) - defer testContext.Close() + if testing.Short() { + t.Skip("this is not a short test") + } - scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") - testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) - utils.CleanAccumulatedIntermediateTransactions(t, testContext) + t.Run("before relayed base cost fix", testRelayedBuildInFunctionChangeOwnerCallShouldWork(integrationTests.UnreachableEpoch, big.NewInt(25610), big.NewInt(4390))) + t.Run("after relayed base cost fix", testRelayedBuildInFunctionChangeOwnerCallShouldWork(0, big.NewInt(24854), big.NewInt(5146))) +} - relayerAddr := []byte("12345678901234567890123456789033") - newOwner := []byte("12345678901234567890123456789112") - gasLimit := uint64(1000) +func testRelayedBuildInFunctionChangeOwnerCallShouldWork( + relayedFixActivationEpoch uint32, + expectedBalanceRelayer *big.Int, + expectedAccumulatedFees *big.Int, +) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMs( + config.EnableEpochs{ + PenalizedTooMuchGasEnableEpoch: integrationTests.UnreachableEpoch, + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() - txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) - innerTx := vm.CreateTransaction(1, big.NewInt(0), owner, scAddress, gasPrice, gasLimit, txData) + scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9991691, 8309, 39) + testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) + utils.CleanAccumulatedIntermediateTransactions(t, testContext) - _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) + relayerAddr := []byte("12345678901234567890123456789033") + newOwner := []byte("12345678901234567890123456789112") + gasLimit := uint64(1000) - rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, owner, gasPrice, rTxGasLimit, rtxData) + txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) + innerTx := vm.CreateTransaction(1, big.NewInt(0), owner, scAddress, gasPrice, gasLimit, txData) - retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.Ok, retCode) - require.Nil(t, err) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) - _, err = testContext.Accounts.Commit() - require.Nil(t, err) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, owner, gasPrice, rTxGasLimit, rtxData) - utils.CheckOwnerAddr(t, testContext, scAddress, newOwner) + retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) - expectedBalanceRelayer := big.NewInt(16610) - vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) + _, err = testContext.Accounts.Commit() + require.Nil(t, err) - expectedBalance := big.NewInt(88100) - vm.TestAccount(t, testContext.Accounts, owner, 2, expectedBalance) + utils.CheckOwnerAddr(t, testContext, scAddress, newOwner) - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(13390), accumulatedFees) + vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) - developerFees := testContext.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(915), developerFees) + expectedBalance := big.NewInt(9991691) + vm.TestAccount(t, testContext.Accounts, owner, 2, expectedBalance) + + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccumulatedFees, accumulatedFees) + + developerFees := testContext.TxFeeHandler.GetDeveloperFees() + require.Equal(t, big.NewInt(91), developerFees) + } } func TestRelayedBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) - require.Nil(t, err) - defer testContext.Close() + if testing.Short() { + t.Skip("this is not a short test") + } - scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") - testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) - utils.CleanAccumulatedIntermediateTransactions(t, testContext) + t.Run("before relayed base cost fix", testRelayedBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(integrationTests.UnreachableEpoch, big.NewInt(25610), big.NewInt(4390))) + t.Run("after relayed base cost fix", testRelayedBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas(0, big.NewInt(25610), big.NewInt(4390))) +} - relayerAddr := []byte("12345678901234567890123456789033") - sndAddr := []byte("12345678901234567890123456789113") - newOwner := []byte("12345678901234567890123456789112") - gasLimit := uint64(1000) +func testRelayedBuildInFunctionChangeOwnerCallWrongOwnerShouldConsumeGas( + relayedFixActivationEpoch uint32, + expectedBalanceRelayer *big.Int, + expectedAccumulatedFees *big.Int, +) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() - txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) - innerTx := vm.CreateTransaction(1, big.NewInt(0), sndAddr, scAddress, gasPrice, gasLimit, txData) + scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9991691, 8309, 39) + testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) + utils.CleanAccumulatedIntermediateTransactions(t, testContext) - _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) + relayerAddr := []byte("12345678901234567890123456789033") + sndAddr := []byte("12345678901234567890123456789113") + newOwner := []byte("12345678901234567890123456789112") + gasLimit := uint64(1000) - rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, owner, gasPrice, rTxGasLimit, rtxData) + txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) + innerTx := vm.CreateTransaction(1, big.NewInt(0), sndAddr, scAddress, gasPrice, gasLimit, txData) - retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.UserError, retCode) - require.Equal(t, process.ErrFailedTransaction, err) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) - _, err = testContext.Accounts.Commit() - require.Nil(t, err) + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, owner, gasPrice, rTxGasLimit, rtxData) - utils.CheckOwnerAddr(t, testContext, scAddress, owner) + retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, process.ErrFailedTransaction, err) - expectedBalanceRelayer := big.NewInt(16610) - vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) + _, err = testContext.Accounts.Commit() + require.Nil(t, err) - expectedBalance := big.NewInt(88100) - vm.TestAccount(t, testContext.Accounts, owner, 1, expectedBalance) + utils.CheckOwnerAddr(t, testContext, scAddress, owner) - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(13390), accumulatedFees) + vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) - developerFees := testContext.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(0), developerFees) + expectedBalance := big.NewInt(9991691) + vm.TestAccount(t, testContext.Accounts, owner, 1, expectedBalance) + + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccumulatedFees, accumulatedFees) + + developerFees := testContext.TxFeeHandler.GetDeveloperFees() + require.Equal(t, big.NewInt(0), developerFees) + } } func TestRelayedBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() - scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) utils.CleanAccumulatedIntermediateTransactions(t, testContext) @@ -132,7 +163,7 @@ func TestRelayedBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *test _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, owner, gasPrice, rTxGasLimit, rtxData) retCode, _ := testContext.TxProcessor.ProcessTransaction(rtx) @@ -146,7 +177,7 @@ func TestRelayedBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *test expectedBalanceRelayer := big.NewInt(17330) vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) - expectedBalance := big.NewInt(88100) + expectedBalance := big.NewInt(9988100) vm.TestAccount(t, testContext.Accounts, owner, 2, expectedBalance) // check accumulated fees @@ -158,16 +189,22 @@ func TestRelayedBuildInFunctionChangeOwnerInvalidAddressShouldConsumeGas(t *test } func TestRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + t.Run("nonce fix is disabled, should increase the sender's nonce", func(t *testing.T) { testRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeGas(t, config.EnableEpochs{ - RelayedNonceFixEnableEpoch: 1000, + RelayedNonceFixEnableEpoch: 1000, + FixRelayedBaseCostEnableEpoch: 1000, }) }) t.Run("nonce fix is enabled, should still increase the sender's nonce", func(t *testing.T) { testRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeGas(t, config.EnableEpochs{ - RelayedNonceFixEnableEpoch: 0, + RelayedNonceFixEnableEpoch: 0, + FixRelayedBaseCostEnableEpoch: 1000, }) }) } @@ -176,11 +213,11 @@ func testRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeG t *testing.T, enableEpochs config.EnableEpochs, ) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(enableEpochs) + testContext, err := vm.CreatePreparedTxProcessorWithVMs(enableEpochs, 1) require.Nil(t, err) defer testContext.Close() - scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) utils.CleanAccumulatedIntermediateTransactions(t, testContext) @@ -194,7 +231,7 @@ func testRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeG _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, owner, gasPrice, rTxGasLimit, rtxData) retCode, _ := testContext.TxProcessor.ProcessTransaction(rtx) @@ -208,7 +245,7 @@ func testRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeG expectedBalanceRelayer := big.NewInt(25810) vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) - expectedBalance := big.NewInt(88100) + expectedBalance := big.NewInt(9988100) vm.TestAccount(t, testContext.Accounts, owner, 2, expectedBalance) // check accumulated fees @@ -220,11 +257,15 @@ func testRelayedBuildInFunctionChangeOwnerCallInsufficientGasLimitShouldConsumeG } func TestRelayedBuildInFunctionChangeOwnerCallOutOfGasShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() - scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, owner := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) utils.CleanAccumulatedIntermediateTransactions(t, testContext) @@ -232,13 +273,13 @@ func TestRelayedBuildInFunctionChangeOwnerCallOutOfGasShouldConsumeGas(t *testin newOwner := []byte("12345678901234567890123456789112") txData := []byte(core.BuiltInFunctionChangeOwnerAddress + "@" + hex.EncodeToString(newOwner)) - gasLimit := uint64(len(txData) + 1) + gasLimit := uint64(len(txData)) + minGasLimit innerTx := vm.CreateTransaction(1, big.NewInt(0), owner, scAddress, gasPrice, gasLimit, txData) _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, owner, gasPrice, rTxGasLimit, rtxData) retCode, _ := testContext.TxProcessor.ProcessTransaction(rtx) @@ -252,7 +293,7 @@ func TestRelayedBuildInFunctionChangeOwnerCallOutOfGasShouldConsumeGas(t *testin expectedBalanceRelayer := big.NewInt(25790) vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) - expectedBalance := big.NewInt(88100) + expectedBalance := big.NewInt(9988100) vm.TestAccount(t, testContext.Accounts, owner, 2, expectedBalance) // check accumulated fees diff --git a/integrationTests/vm/txsFee/relayedDns_test.go b/integrationTests/vm/txsFee/relayedDns_test.go index e71c02622f1..389941886e7 100644 --- a/integrationTests/vm/txsFee/relayedDns_test.go +++ b/integrationTests/vm/txsFee/relayedDns_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -18,7 +14,11 @@ import ( ) func TestRelayedTxDnsTransaction_ShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/relayedESDT_test.go b/integrationTests/vm/txsFee/relayedESDT_test.go index eba6eedb384..55a7e1bde04 100644 --- a/integrationTests/vm/txsFee/relayedESDT_test.go +++ b/integrationTests/vm/txsFee/relayedESDT_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -17,91 +13,125 @@ import ( ) func TestRelayedESDTTransferShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) - require.Nil(t, err) - defer testContext.Close() - - relayerAddr := []byte("12345678901234567890123456789033") - sndAddr := []byte("12345678901234567890123456789012") - rcvAddr := []byte("12345678901234567890123456789022") - - relayerBalance := big.NewInt(10000000) - localEsdtBalance := big.NewInt(100000000) - token := []byte("miiutoken") - utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, big.NewInt(0), token, 0, localEsdtBalance) - _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, relayerBalance) - - gasLimit := uint64(40) - innerTx := utils.CreateESDTTransferTx(0, sndAddr, rcvAddr, token, big.NewInt(100), gasPrice, gasLimit) - - rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - - retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.Ok, retCode) - require.Nil(t, err) - - _, err = testContext.Accounts.Commit() - require.Nil(t, err) - - expectedBalanceSnd := big.NewInt(99999900) - utils.CheckESDTBalance(t, testContext, sndAddr, token, expectedBalanceSnd) - - expectedReceiverBalance := big.NewInt(100) - utils.CheckESDTBalance(t, testContext, rcvAddr, token, expectedReceiverBalance) - - expectedEGLDBalance := big.NewInt(0) - utils.TestAccount(t, testContext.Accounts, sndAddr, 1, expectedEGLDBalance) + if testing.Short() { + t.Skip("this is not a short test") + } - utils.TestAccount(t, testContext.Accounts, relayerAddr, 1, big.NewInt(9997290)) - - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(2710), accumulatedFees) + t.Run("before relayed base cost fix", testRelayedESDTTransferShouldWork(integrationTests.UnreachableEpoch, big.NewInt(9997614), big.NewInt(2386))) + t.Run("after relayed base cost fix", testRelayedESDTTransferShouldWork(0, big.NewInt(9997299), big.NewInt(2701))) } -func TestTestRelayedESTTransferNotEnoughESTValueShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) - require.Nil(t, err) - defer testContext.Close() - - relayerAddr := []byte("12345678901234567890123456789033") - sndAddr := []byte("12345678901234567890123456789012") - rcvAddr := []byte("12345678901234567890123456789022") - - relayerBalance := big.NewInt(10000000) - localEsdtBalance := big.NewInt(100000000) - token := []byte("miiutoken") - utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, big.NewInt(0), token, 0, localEsdtBalance) - _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, relayerBalance) - - gasLimit := uint64(40) - innerTx := utils.CreateESDTTransferTx(0, sndAddr, rcvAddr, token, big.NewInt(100000001), gasPrice, gasLimit) - - rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - - retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.UserError, retCode) - require.Nil(t, err) - - _, err = testContext.Accounts.Commit() - require.Nil(t, err) - - expectedBalanceSnd := big.NewInt(100000000) - utils.CheckESDTBalance(t, testContext, sndAddr, token, expectedBalanceSnd) - - expectedReceiverBalance := big.NewInt(0) - utils.CheckESDTBalance(t, testContext, rcvAddr, token, expectedReceiverBalance) +func testRelayedESDTTransferShouldWork( + relayedFixActivationEpoch uint32, + expectedRelayerBalance *big.Int, + expectedAccFees *big.Int, +) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() + + relayerAddr := []byte("12345678901234567890123456789033") + sndAddr := []byte("12345678901234567890123456789012") + rcvAddr := []byte("12345678901234567890123456789022") + + relayerBalance := big.NewInt(10000000) + localEsdtBalance := big.NewInt(100000000) + token := []byte("miiutoken") + utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, big.NewInt(0), token, 0, localEsdtBalance) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, relayerBalance) + + gasLimit := uint64(40) + innerTx := utils.CreateESDTTransferTx(0, sndAddr, rcvAddr, token, big.NewInt(100), gasPrice, gasLimit) + + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + + retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + _, err = testContext.Accounts.Commit() + require.Nil(t, err) + + expectedBalanceSnd := big.NewInt(99999900) + utils.CheckESDTBalance(t, testContext, sndAddr, token, expectedBalanceSnd) + + expectedReceiverBalance := big.NewInt(100) + utils.CheckESDTBalance(t, testContext, rcvAddr, token, expectedReceiverBalance) + + expectedEGLDBalance := big.NewInt(0) + utils.TestAccount(t, testContext.Accounts, sndAddr, 1, expectedEGLDBalance) + + utils.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedRelayerBalance) + + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccFees, accumulatedFees) + } +} - expectedEGLDBalance := big.NewInt(0) - utils.TestAccount(t, testContext.Accounts, sndAddr, 1, expectedEGLDBalance) +func TestRelayedESTTransferNotEnoughESTValueShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } - utils.TestAccount(t, testContext.Accounts, relayerAddr, 1, big.NewInt(9997130)) + t.Run("before relayed base cost fix", testRelayedESTTransferNotEnoughESTValueShouldConsumeGas(integrationTests.UnreachableEpoch, big.NewInt(9997488), big.NewInt(2512))) + t.Run("after relayed base cost fix", testRelayedESTTransferNotEnoughESTValueShouldConsumeGas(0, big.NewInt(9997119), big.NewInt(2881))) +} - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(2870), accumulatedFees) +func testRelayedESTTransferNotEnoughESTValueShouldConsumeGas( + relayedFixActivationEpoch uint32, + expectedRelayerBalance *big.Int, + expectedAccFees *big.Int, +) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() + + relayerAddr := []byte("12345678901234567890123456789033") + sndAddr := []byte("12345678901234567890123456789012") + rcvAddr := []byte("12345678901234567890123456789022") + + relayerBalance := big.NewInt(10000000) + localEsdtBalance := big.NewInt(100000000) + token := []byte("miiutoken") + utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, big.NewInt(0), token, 0, localEsdtBalance) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, relayerBalance) + + gasLimit := uint64(42) + innerTx := utils.CreateESDTTransferTx(0, sndAddr, rcvAddr, token, big.NewInt(100000001), gasPrice, gasLimit) + + rtxData := integrationTests.PrepareRelayedTxDataV1(innerTx) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, innerTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + + retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.ExecutionFailed, retCode) + require.Nil(t, err) + + _, err = testContext.Accounts.Commit() + require.Nil(t, err) + + expectedBalanceSnd := big.NewInt(100000000) + utils.CheckESDTBalance(t, testContext, sndAddr, token, expectedBalanceSnd) + + expectedReceiverBalance := big.NewInt(0) + utils.CheckESDTBalance(t, testContext, rcvAddr, token, expectedReceiverBalance) + + expectedEGLDBalance := big.NewInt(0) + utils.TestAccount(t, testContext.Accounts, sndAddr, 1, expectedEGLDBalance) + + utils.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedRelayerBalance) + + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccFees, accumulatedFees) + } } diff --git a/integrationTests/vm/txsFee/relayedMoveBalance_test.go b/integrationTests/vm/txsFee/relayedMoveBalance_test.go index 2c7e230941d..1a81602ff82 100644 --- a/integrationTests/vm/txsFee/relayedMoveBalance_test.go +++ b/integrationTests/vm/txsFee/relayedMoveBalance_test.go @@ -19,7 +19,13 @@ import ( ) func TestRelayedMoveBalanceShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: integrationTests.UnreachableEpoch, + }, 1) require.Nil(t, err) defer testContext.Close() @@ -28,7 +34,7 @@ func TestRelayedMoveBalanceShouldWork(t *testing.T) { rcvAddr := []byte("12345678901234567890123456789022") senderNonce := uint64(0) - senderBalance := big.NewInt(0) + senderBalance := big.NewInt(100) gasLimit := uint64(100) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -38,8 +44,8 @@ func TestRelayedMoveBalanceShouldWork(t *testing.T) { userTx := vm.CreateTransaction(senderNonce, big.NewInt(100), sndAddr, rcvAddr, gasPrice, gasLimit, []byte("aaaa")) rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + rTxGasLimit := gasLimit + minGasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) require.Equal(t, vmcommon.Ok, retCode) @@ -49,8 +55,8 @@ func TestRelayedMoveBalanceShouldWork(t *testing.T) { require.Nil(t, err) // check relayer balance - // 3000 - value(100) - gasLimit(275)*gasPrice(10) = 2850 - expectedBalanceRelayer := big.NewInt(150) + // 3000 - rTxFee(175)*gasPrice(10) + txFeeInner(1000) = 2750 + expectedBalanceRelayer := big.NewInt(250) vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) // check balance inner tx sender @@ -65,7 +71,11 @@ func TestRelayedMoveBalanceShouldWork(t *testing.T) { } func TestRelayedMoveBalanceInvalidGasLimitShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -80,7 +90,7 @@ func TestRelayedMoveBalanceInvalidGasLimitShouldConsumeGas(t *testing.T) { rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) rTxGasLimit := 2 + userTx.GasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, 1, rTxGasLimit, rtxData) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, 1, rTxGasLimit, rtxData) _, err = testContext.TxProcessor.ProcessTransaction(rtx) require.Equal(t, process.ErrFailedTransaction, err) @@ -97,7 +107,13 @@ func TestRelayedMoveBalanceInvalidGasLimitShouldConsumeGas(t *testing.T) { } func TestRelayedMoveBalanceInvalidUserTxShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: integrationTests.UnreachableEpoch, + }, 1) require.Nil(t, err) defer testContext.Close() @@ -105,14 +121,14 @@ func TestRelayedMoveBalanceInvalidUserTxShouldConsumeGas(t *testing.T) { sndAddr := []byte("12345678901234567890123456789012") rcvAddr := []byte("12345678901234567890123456789022") - _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) + _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(100)) userTx := vm.CreateTransaction(1, big.NewInt(100), sndAddr, rcvAddr, 1, 100, []byte("aaaa")) _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(3000)) rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + userTx.GasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, 1, rTxGasLimit, rtxData) + rTxGasLimit := minGasLimit + userTx.GasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, 1, rTxGasLimit, rtxData) retcode, _ := testContext.TxProcessor.ProcessTransaction(rtx) require.Equal(t, vmcommon.UserError, retcode) @@ -120,6 +136,7 @@ func TestRelayedMoveBalanceInvalidUserTxShouldConsumeGas(t *testing.T) { _, err = testContext.Accounts.Commit() require.Nil(t, err) + // 3000 - rTxFee(179)*gasPrice(1) - innerTxFee(100) = 2721 expectedBalanceRelayer := big.NewInt(2721) vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) @@ -129,9 +146,14 @@ func TestRelayedMoveBalanceInvalidUserTxShouldConsumeGas(t *testing.T) { } func TestRelayedMoveBalanceInvalidUserTxValueShouldConsumeGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ - RelayedNonceFixEnableEpoch: 1, - }) + RelayedNonceFixEnableEpoch: 1, + FixRelayedBaseCostEnableEpoch: integrationTests.UnreachableEpoch, + }, 1) require.Nil(t, err) defer testContext.Close() @@ -139,14 +161,14 @@ func TestRelayedMoveBalanceInvalidUserTxValueShouldConsumeGas(t *testing.T) { sndAddr := []byte("12345678901234567890123456789012") rcvAddr := []byte("12345678901234567890123456789022") - _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) + _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(100)) userTx := vm.CreateTransaction(0, big.NewInt(150), sndAddr, rcvAddr, 1, 100, []byte("aaaa")) _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(3000)) rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + userTx.GasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, big.NewInt(100), relayerAddr, sndAddr, 1, rTxGasLimit, rtxData) + rTxGasLimit := minGasLimit + userTx.GasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, 1, rTxGasLimit, rtxData) retCode, _ := testContext.TxProcessor.ProcessTransaction(rtx) require.Equal(t, vmcommon.UserError, retCode) @@ -154,6 +176,7 @@ func TestRelayedMoveBalanceInvalidUserTxValueShouldConsumeGas(t *testing.T) { _, err = testContext.Accounts.Commit() require.Nil(t, err) + // 3000 - rTxFee(175)*gasPrice(1) - innerTxFee(100) = 2750 expectedBalanceRelayer := big.NewInt(2725) vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) @@ -163,9 +186,13 @@ func TestRelayedMoveBalanceInvalidUserTxValueShouldConsumeGas(t *testing.T) { } func TestRelayedMoveBalanceHigherNonce(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ RelayedNonceFixEnableEpoch: 1, - }) + }, 1) require.Nil(t, err) defer testContext.Close() @@ -215,9 +242,13 @@ func TestRelayedMoveBalanceHigherNonce(t *testing.T) { } func TestRelayedMoveBalanceLowerNonce(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ RelayedNonceFixEnableEpoch: 1, - }) + }, 1) require.Nil(t, err) defer testContext.Close() @@ -267,6 +298,10 @@ func TestRelayedMoveBalanceLowerNonce(t *testing.T) { } func TestRelayedMoveBalanceHigherNonceWithActivatedFixCrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + enableEpochs := config.EnableEpochs{ RelayedNonceFixEnableEpoch: 0, } @@ -277,6 +312,7 @@ func TestRelayedMoveBalanceHigherNonceWithActivatedFixCrossShard(t *testing.T) { shardCoordinator0, integrationtests.CreateMemUnit(), vm.CreateMockGasScheduleNotifier(), + 1, ) require.Nil(t, err) @@ -286,6 +322,7 @@ func TestRelayedMoveBalanceHigherNonceWithActivatedFixCrossShard(t *testing.T) { shardCoordinator1, integrationtests.CreateMemUnit(), vm.CreateMockGasScheduleNotifier(), + 1, ) require.Nil(t, err) defer testContext0.Close() @@ -329,7 +366,7 @@ func executeRelayedTransaction( ) { testContext.TxsLogsProcessor.Clean() relayerAccount := getAccount(tb, testContext, relayerAddress) - gasLimit := 1 + userTx.GasLimit + uint64(len(userTxPrepared)) + gasLimit := minGasLimit + userTx.GasLimit + uint64(len(userTxPrepared)) relayedTx := vm.CreateTransaction(relayerAccount.GetNonce(), value, relayerAddress, senderAddress, 1, gasLimit, userTxPrepared) retCode, _ := testContext.TxProcessor.ProcessTransaction(relayedTx) diff --git a/integrationTests/vm/txsFee/relayedScCalls_test.go b/integrationTests/vm/txsFee/relayedScCalls_test.go index d5e0e46179e..20ee29b02e5 100644 --- a/integrationTests/vm/txsFee/relayedScCalls_test.go +++ b/integrationTests/vm/txsFee/relayedScCalls_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -19,205 +15,290 @@ import ( ) func TestRelayedScCallShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ - DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) - require.Nil(t, err) - defer testContext.Close() + if testing.Short() { + t.Skip("this is not a short test") + } - scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") - utils.CleanAccumulatedIntermediateTransactions(t, testContext) + t.Run("before relayed base cost fix", testRelayedScCallShouldWork(integrationTests.UnreachableEpoch, big.NewInt(29982306), big.NewInt(25903), big.NewInt(1608))) + t.Run("after relayed base cost fix", testRelayedScCallShouldWork(0, big.NewInt(29982216), big.NewInt(25993), big.NewInt(1608))) +} - relayerAddr := []byte("12345678901234567890123456789033") - sndAddr := []byte("12345678901234567890123456789112") - gasLimit := uint64(1000) +func testRelayedScCallShouldWork( + relayedFixActivationEpoch uint32, + expectedRelayerBalance *big.Int, + expectedAccFees *big.Int, + expectedDevFees *big.Int, +) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() - _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) - _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) + scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9991691, 8309, 39) + utils.CleanAccumulatedIntermediateTransactions(t, testContext) - userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddress, gasPrice, gasLimit, []byte("increment")) + relayerAddr := []byte("12345678901234567890123456789033") + sndAddr := []byte("12345678901234567890123456789112") + gasLimit := uint64(100000) - rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000000)) - retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.Ok, retCode) - require.Nil(t, err) + userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddress, gasPrice, gasLimit, []byte("increment")) - _, err = testContext.Accounts.Commit() - require.Nil(t, err) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - ret := vm.GetIntValueFromSC(nil, testContext.Accounts, scAddress, "get") - require.Equal(t, big.NewInt(2), ret) + retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) - expectedBalance := big.NewInt(23850) - vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalance) + _, err = testContext.Accounts.Commit() + require.Nil(t, err) - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(17950), accumulatedFees) + ret := vm.GetIntValueFromSC(nil, testContext.Accounts, scAddress, "get") + require.Equal(t, big.NewInt(2), ret) - developerFees := testContext.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(807), developerFees) + vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedRelayerBalance) + + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccFees, accumulatedFees) + + developerFees := testContext.TxFeeHandler.GetDeveloperFees() + require.Equal(t, expectedDevFees, developerFees) + } } func TestRelayedScCallContractNotFoundShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) - require.Nil(t, err) - defer testContext.Close() + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("before relayed fix", testRelayedScCallContractNotFoundShouldConsumeGas(integrationTests.UnreachableEpoch, big.NewInt(27130), big.NewInt(2870))) + t.Run("after relayed fix", testRelayedScCallContractNotFoundShouldConsumeGas(0, big.NewInt(27040), big.NewInt(2960))) +} - scAddress := "00000000000000000500dbb53e4b23392b0d6f36cce32deb2d623e9625ab3132" - scAddrBytes, _ := hex.DecodeString(scAddress) +func testRelayedScCallContractNotFoundShouldConsumeGas( + relayedFixActivationEpoch uint32, + expectedRelayerBalance *big.Int, + expectedAccFees *big.Int, +) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() - relayerAddr := []byte("12345678901234567890123456789033") - sndAddr := []byte("12345678901234567890123456789112") - gasLimit := uint64(1000) + scAddress := "00000000000000000500dbb53e4b23392b0d6f36cce32deb2d623e9625ab3132" + scAddrBytes, _ := hex.DecodeString(scAddress) - _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) - _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) + relayerAddr := []byte("12345678901234567890123456789033") + sndAddr := []byte("12345678901234567890123456789112") + gasLimit := uint64(1000) - userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddrBytes, gasPrice, gasLimit, []byte("increment")) + _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) - rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddrBytes, gasPrice, gasLimit, []byte("increment")) - retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.UserError, retCode) - require.Nil(t, err) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - _, err = testContext.Accounts.Commit() - require.Nil(t, err) + retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.UserError, retCode) + require.Nil(t, err) + + _, err = testContext.Accounts.Commit() + require.Nil(t, err) - expectedBalance := big.NewInt(18130) - vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalance) + vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedRelayerBalance) - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(11870), accumulatedFees) + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccFees, accumulatedFees) - developerFees := testContext.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(0), developerFees) + developerFees := testContext.TxFeeHandler.GetDeveloperFees() + require.Equal(t, big.NewInt(0), developerFees) + } } func TestRelayedScCallInvalidMethodShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) - require.Nil(t, err) - defer testContext.Close() + if testing.Short() { + t.Skip("this is not a short test") + } - scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") - utils.CleanAccumulatedIntermediateTransactions(t, testContext) + t.Run("before relayed fix", testRelayedScCallInvalidMethodShouldConsumeGas(integrationTests.UnreachableEpoch, big.NewInt(26924), big.NewInt(11385))) + t.Run("after relayed fix", testRelayedScCallInvalidMethodShouldConsumeGas(0, big.NewInt(26924), big.NewInt(11385))) +} - relayerAddr := []byte("12345678901234567890123456789033") - sndAddr := []byte("12345678901234567890123456789112") - gasLimit := uint64(1000) +func testRelayedScCallInvalidMethodShouldConsumeGas( + relayedFixActivationEpoch uint32, + expectedRelayerBalance *big.Int, + expectedAccFees *big.Int, +) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + RelayedNonceFixEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() - _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) - _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) + scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9991691, 8309, 39) + utils.CleanAccumulatedIntermediateTransactions(t, testContext) - userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddress, gasPrice, gasLimit, []byte("invalidMethod")) + relayerAddr := []byte("12345678901234567890123456789033") + sndAddr := []byte("12345678901234567890123456789112") + gasLimit := uint64(1000) - rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) - retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.UserError, retCode) - require.Nil(t, err) + userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddress, gasPrice, gasLimit, []byte("invalidMethod")) - _, err = testContext.Accounts.Commit() - require.Nil(t, err) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - expectedBalance := big.NewInt(18050) - vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalance) + retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.UserError, retCode) + require.Nil(t, err) - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(23850), accumulatedFees) + _, err = testContext.Accounts.Commit() + require.Nil(t, err) - developerFees := testContext.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(399), developerFees) + vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedRelayerBalance) + + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccFees, accumulatedFees) + + developerFees := testContext.TxFeeHandler.GetDeveloperFees() + require.Equal(t, big.NewInt(39), developerFees) + } } func TestRelayedScCallInsufficientGasLimitShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) - require.Nil(t, err) - defer testContext.Close() + if testing.Short() { + t.Skip("this is not a short test") + } - scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") - utils.CleanAccumulatedIntermediateTransactions(t, testContext) + t.Run("before relayed fix", testRelayedScCallInsufficientGasLimitShouldConsumeGas(integrationTests.UnreachableEpoch, big.NewInt(28140), big.NewInt(10169))) + t.Run("after relayed fix", testRelayedScCallInsufficientGasLimitShouldConsumeGas(0, big.NewInt(28050), big.NewInt(10259))) +} - relayerAddr := []byte("12345678901234567890123456789033") - sndAddr := []byte("12345678901234567890123456789112") - gasLimit := uint64(5) +func testRelayedScCallInsufficientGasLimitShouldConsumeGas( + relayedFixActivationEpoch uint32, + expectedBalance *big.Int, + expectedAccumulatedFees *big.Int, +) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() - _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) - _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) + scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9991691, 8309, 39) + utils.CleanAccumulatedIntermediateTransactions(t, testContext) - userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddress, gasPrice, gasLimit, []byte("increment")) + relayerAddr := []byte("12345678901234567890123456789033") + sndAddr := []byte("12345678901234567890123456789112") + data := "increment" + gasLimit := minGasLimit + uint64(len(data)) - rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) - retCode, _ := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.UserError, retCode) + userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddress, gasPrice, gasLimit, []byte(data)) - _, err = testContext.Accounts.Commit() - require.Nil(t, err) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + + retCode, _ := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.UserError, retCode) - expectedBalance := big.NewInt(28100) - vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalance) + _, err = testContext.Accounts.Commit() + require.Nil(t, err) - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(13800), accumulatedFees) + vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalance) - developerFees := testContext.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(399), developerFees) + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccumulatedFees, accumulatedFees) + + developerFees := testContext.TxFeeHandler.GetDeveloperFees() + require.Equal(t, big.NewInt(39), developerFees) + } } func TestRelayedScCallOutOfGasShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) - require.Nil(t, err) - defer testContext.Close() + if testing.Short() { + t.Skip("this is not a short test") + } - scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") - utils.CleanAccumulatedIntermediateTransactions(t, testContext) + t.Run("before relayed fix", testRelayedScCallOutOfGasShouldConsumeGas(integrationTests.UnreachableEpoch, big.NewInt(28040), big.NewInt(10269))) + t.Run("after relayed fix", testRelayedScCallOutOfGasShouldConsumeGas(0, big.NewInt(28040), big.NewInt(10269))) +} - relayerAddr := []byte("12345678901234567890123456789033") - sndAddr := []byte("12345678901234567890123456789112") - gasLimit := uint64(20) +func testRelayedScCallOutOfGasShouldConsumeGas( + relayedFixActivationEpoch uint32, + expectedRelayerBalance *big.Int, + expectedAccFees *big.Int, +) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + RelayedNonceFixEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() - _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) - _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) + scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9991691, 8309, 39) + utils.CleanAccumulatedIntermediateTransactions(t, testContext) - userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddress, gasPrice, gasLimit, []byte("increment")) + relayerAddr := []byte("12345678901234567890123456789033") + sndAddr := []byte("12345678901234567890123456789112") + gasLimit := uint64(20) - rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, big.NewInt(0)) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(30000)) - retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.UserError, retCode) - require.Nil(t, err) + userTx := vm.CreateTransaction(0, big.NewInt(100), sndAddr, scAddress, gasPrice, gasLimit, []byte("increment")) - _, err = testContext.Accounts.Commit() - require.Nil(t, err) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + + retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.UserError, retCode) + require.Nil(t, err) - expectedBalance := big.NewInt(27950) - vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalance) + _, err = testContext.Accounts.Commit() + require.Nil(t, err) - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(13950), accumulatedFees) + vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedRelayerBalance) - developerFees := testContext.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(399), developerFees) + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccFees, accumulatedFees) + + developerFees := testContext.TxFeeHandler.GetDeveloperFees() + require.Equal(t, big.NewInt(39), developerFees) + } } func TestRelayedDeployInvalidContractShouldIncrementNonceOnSender(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddr := []byte("12345678901234567890123456789011") t.Run("nonce fix is disabled, should increase the sender's nonce if inner tx has correct nonce", func(t *testing.T) { @@ -261,7 +342,7 @@ func testRelayedDeployInvalidContractShouldIncrementNonceOnSender( senderAddr []byte, senderNonce uint64, ) *vm.VMTestContext { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(enableEpochs) + testContext, err := vm.CreatePreparedTxProcessorWithVMs(enableEpochs, 1) require.Nil(t, err) relayerAddr := []byte("12345678901234567890123456789033") @@ -274,7 +355,7 @@ func testRelayedDeployInvalidContractShouldIncrementNonceOnSender( userTx := vm.CreateTransaction(senderNonce, big.NewInt(100), senderAddr, emptyAddress, gasPrice, gasLimit, nil) rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) + rTxGasLimit := minGasLimit + gasLimit + uint64(len(rtxData)) rtx := vm.CreateTransaction(0, userTx.Value, relayerAddr, senderAddr, gasPrice, rTxGasLimit, rtxData) retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) diff --git a/integrationTests/vm/txsFee/relayedScDeploy_test.go b/integrationTests/vm/txsFee/relayedScDeploy_test.go index 8a8f7f52d8c..21bd43df7e2 100644 --- a/integrationTests/vm/txsFee/relayedScDeploy_test.go +++ b/integrationTests/vm/txsFee/relayedScDeploy_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -17,161 +13,227 @@ import ( ) func TestRelayedScDeployShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) - require.Nil(t, err) - defer testContext.Close() + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("before relayed fix", testRelayedScDeployShouldWork(integrationTests.UnreachableEpoch, big.NewInt(20170), big.NewInt(29830))) + t.Run("after relayed fix", testRelayedScDeployShouldWork(0, big.NewInt(8389), big.NewInt(41611))) +} - relayerAddr := []byte("12345678901234567890123456789033") - sndAddr := []byte("12345678901234567890123456789012") +func testRelayedScDeployShouldWork( + relayedFixActivationEpoch uint32, + expectedRelayerBalance *big.Int, + expectedAccFees *big.Int, +) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() - senderNonce := uint64(0) - senderBalance := big.NewInt(0) - gasLimit := uint64(1962) + relayerAddr := []byte("12345678901234567890123456789033") + sndAddr := []byte("12345678901234567890123456789012") - _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) - _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(50000)) + senderNonce := uint64(0) + senderBalance := big.NewInt(0) + gasLimit := uint64(2000) - scCode := wasm.GetSCCode("../wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm") - userTx := vm.CreateTransaction(senderNonce, big.NewInt(0), sndAddr, vm.CreateEmptyAddress(), gasPrice, gasLimit, []byte(wasm.CreateDeployTxData(scCode))) + _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(50000)) - rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + scCode := wasm.GetSCCode("../wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm") + userTx := vm.CreateTransaction(senderNonce, big.NewInt(0), sndAddr, vm.CreateEmptyAddress(), gasPrice, gasLimit, []byte(wasm.CreateDeployTxData(scCode))) - retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.Ok, retCode) - require.Nil(t, err) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - _, err = testContext.Accounts.Commit() - require.Nil(t, err) + retCode, err := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) - expectedBalanceRelayer := big.NewInt(2530) - vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) + _, err = testContext.Accounts.Commit() + require.Nil(t, err) - // check balance inner tx sender - vm.TestAccount(t, testContext.Accounts, sndAddr, 1, big.NewInt(0)) + vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedRelayerBalance) - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(47470), accumulatedFees) + // check balance inner tx sender + vm.TestAccount(t, testContext.Accounts, sndAddr, 1, big.NewInt(0)) + + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccFees, accumulatedFees) + } } func TestRelayedScDeployInvalidCodeShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) - require.Nil(t, err) - defer testContext.Close() + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("before relayed fix", testRelayedScDeployInvalidCodeShouldConsumeGas(integrationTests.UnreachableEpoch, big.NewInt(20716), big.NewInt(29284))) + t.Run("after relayed fix", testRelayedScDeployInvalidCodeShouldConsumeGas(0, big.NewInt(8890), big.NewInt(41110))) +} - relayerAddr := []byte("12345678901234567890123456789033") - sndAddr := []byte("12345678901234567890123456789012") +func testRelayedScDeployInvalidCodeShouldConsumeGas( + relayedFixActivationEpoch uint32, + expectedBalance *big.Int, + expectedAccumulatedFees *big.Int, +) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() - senderNonce := uint64(0) - senderBalance := big.NewInt(0) - gasLimit := uint64(500) + relayerAddr := []byte("12345678901234567890123456789033") + sndAddr := []byte("12345678901234567890123456789012") - _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) - _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(50000)) + senderNonce := uint64(0) + senderBalance := big.NewInt(0) - scCode := wasm.GetSCCode("../wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm") - scCodeBytes := []byte(wasm.CreateDeployTxData(scCode)) - scCodeBytes = append(scCodeBytes, []byte("aaaaa")...) - userTx := vm.CreateTransaction(senderNonce, big.NewInt(0), sndAddr, vm.CreateEmptyAddress(), gasPrice, gasLimit, scCodeBytes) + _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(50000)) - rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + scCode := wasm.GetSCCode("../wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm") + scCodeBytes := []byte(wasm.CreateDeployTxData(scCode)) + scCodeBytes = append(scCodeBytes, []byte("aaaaa")...) + gasLimit := minGasLimit + uint64(len(scCodeBytes)) + userTx := vm.CreateTransaction(senderNonce, big.NewInt(0), sndAddr, vm.CreateEmptyAddress(), gasPrice, gasLimit, scCodeBytes) - retCode, _ := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.UserError, retCode) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - _, err = testContext.Accounts.Commit() - require.Nil(t, err) + retCode, _ := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.UserError, retCode) - expectedBalanceRelayer := big.NewInt(17030) - vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) + _, err = testContext.Accounts.Commit() + require.Nil(t, err) - // check balance inner tx sender - vm.TestAccount(t, testContext.Accounts, sndAddr, 1, big.NewInt(0)) + vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalance) - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(32970), accumulatedFees) + // check balance inner tx sender + vm.TestAccount(t, testContext.Accounts, sndAddr, 1, big.NewInt(0)) + + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccumulatedFees, accumulatedFees) + } } func TestRelayedScDeployInsufficientGasLimitShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) - require.Nil(t, err) - defer testContext.Close() + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("before relayed fix", testRelayedScDeployInsufficientGasLimitShouldConsumeGas(integrationTests.UnreachableEpoch, big.NewInt(20821), big.NewInt(29179))) + t.Run("after relayed fix", testRelayedScDeployInsufficientGasLimitShouldConsumeGas(0, big.NewInt(9040), big.NewInt(40960))) +} + +func testRelayedScDeployInsufficientGasLimitShouldConsumeGas( + relayedFixActivationEpoch uint32, + expectedBalance *big.Int, + expectedAccumulatedFees *big.Int, +) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() - relayerAddr := []byte("12345678901234567890123456789033") - sndAddr := []byte("12345678901234567890123456789012") + relayerAddr := []byte("12345678901234567890123456789033") + sndAddr := []byte("12345678901234567890123456789012") - senderNonce := uint64(0) - senderBalance := big.NewInt(0) - gasLimit := uint64(500) + senderNonce := uint64(0) + senderBalance := big.NewInt(0) - _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) - _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(50000)) + _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(50000)) - scCode := wasm.GetSCCode("../wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm") - userTx := vm.CreateTransaction(senderNonce, big.NewInt(0), sndAddr, vm.CreateEmptyAddress(), gasPrice, gasLimit, []byte(wasm.CreateDeployTxData(scCode))) + scCode := wasm.GetSCCode("../wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm") + data := wasm.CreateDeployTxData(scCode) + gasLimit := minGasLimit + uint64(len(data)) + userTx := vm.CreateTransaction(senderNonce, big.NewInt(0), sndAddr, vm.CreateEmptyAddress(), gasPrice, gasLimit, []byte(data)) - rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - retCode, _ := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.UserError, retCode) + retCode, _ := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.UserError, retCode) - _, err = testContext.Accounts.Commit() - require.Nil(t, err) + _, err = testContext.Accounts.Commit() + require.Nil(t, err) - expectedBalanceRelayer := big.NewInt(17130) - vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) + vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalance) - // check balance inner tx sender - vm.TestAccount(t, testContext.Accounts, sndAddr, 1, big.NewInt(0)) + // check balance inner tx sender + vm.TestAccount(t, testContext.Accounts, sndAddr, 1, big.NewInt(0)) - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(32870), accumulatedFees) + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccumulatedFees, accumulatedFees) + } } func TestRelayedScDeployOutOfGasShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) - require.Nil(t, err) - defer testContext.Close() + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("before relayed fix", testRelayedScDeployOutOfGasShouldConsumeGas(integrationTests.UnreachableEpoch, big.NewInt(20821), big.NewInt(29179))) + t.Run("after relayed fix", testRelayedScDeployOutOfGasShouldConsumeGas(0, big.NewInt(9040), big.NewInt(40960))) +} + +func testRelayedScDeployOutOfGasShouldConsumeGas( + relayedFixActivationEpoch uint32, + expectedBalance *big.Int, + expectedAccumulatedFees *big.Int, +) func(t *testing.T) { + return func(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ + FixRelayedBaseCostEnableEpoch: relayedFixActivationEpoch, + }, gasPriceModifier) + require.Nil(t, err) + defer testContext.Close() - relayerAddr := []byte("12345678901234567890123456789033") - sndAddr := []byte("12345678901234567890123456789012") + relayerAddr := []byte("12345678901234567890123456789033") + sndAddr := []byte("12345678901234567890123456789012") - senderNonce := uint64(0) - senderBalance := big.NewInt(0) - gasLimit := uint64(570) + senderNonce := uint64(0) + senderBalance := big.NewInt(0) - _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) - _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(50000)) + _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) + _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(50000)) - scCode := wasm.GetSCCode("../wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm") - userTx := vm.CreateTransaction(senderNonce, big.NewInt(0), sndAddr, vm.CreateEmptyAddress(), gasPrice, gasLimit, []byte(wasm.CreateDeployTxData(scCode))) + scCode := wasm.GetSCCode("../wasm/testdata/misc/fib_wasm/output/fib_wasm.wasm") + data := wasm.CreateDeployTxData(scCode) + gasLimit := minGasLimit + uint64(len(data)) + userTx := vm.CreateTransaction(senderNonce, big.NewInt(0), sndAddr, vm.CreateEmptyAddress(), gasPrice, gasLimit, []byte(data)) - rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) - rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) - rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) + rtxData := integrationTests.PrepareRelayedTxDataV1(userTx) + rTxGasLimit := 1 + gasLimit + uint64(len(rtxData)) + rtx := vm.CreateTransaction(0, big.NewInt(0), relayerAddr, sndAddr, gasPrice, rTxGasLimit, rtxData) - code, err := testContext.TxProcessor.ProcessTransaction(rtx) - require.Equal(t, vmcommon.UserError, code) - require.Nil(t, err) + code, err := testContext.TxProcessor.ProcessTransaction(rtx) + require.Equal(t, vmcommon.UserError, code) + require.Nil(t, err) - _, err = testContext.Accounts.Commit() - require.Nil(t, err) + _, err = testContext.Accounts.Commit() + require.Nil(t, err) - expectedBalanceRelayer := big.NewInt(16430) - vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalanceRelayer) + vm.TestAccount(t, testContext.Accounts, relayerAddr, 1, expectedBalance) - // check balance inner tx sender - vm.TestAccount(t, testContext.Accounts, sndAddr, 1, big.NewInt(0)) + // check balance inner tx sender + vm.TestAccount(t, testContext.Accounts, sndAddr, 1, big.NewInt(0)) - // check accumulated fees - accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(33570), accumulatedFees) + // check accumulated fees + accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() + require.Equal(t, expectedAccumulatedFees, accumulatedFees) + } } diff --git a/integrationTests/vm/txsFee/scCalls_test.go b/integrationTests/vm/txsFee/scCalls_test.go index db01a33cd11..c17474eb9e3 100644 --- a/integrationTests/vm/txsFee/scCalls_test.go +++ b/integrationTests/vm/txsFee/scCalls_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -23,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" @@ -60,7 +57,6 @@ func prepareTestContextForEpoch836(tb testing.TB) (*vm.VMTestContext, []byte) { testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGasAndRoundConfig( config.EnableEpochs{ GovernanceEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, SetSenderInEeiOutputTransferEnableEpoch: unreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, MaxBlockchainHookCountersEnableEpoch: unreachableEpoch, @@ -69,7 +65,8 @@ func prepareTestContextForEpoch836(tb testing.TB) (*vm.VMTestContext, []byte) { mock.NewMultiShardsCoordinatorMock(2), db, gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), + 1, ) require.Nil(tb, err) @@ -90,18 +87,22 @@ func prepareTestContextForEpoch836(tb testing.TB) (*vm.VMTestContext, []byte) { } func TestScCallShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + }, 1) require.Nil(t, err) defer testContext.Close() - scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) utils.CleanAccumulatedIntermediateTransactions(t, testContext) sndAddr := []byte("12345678901234567890123456789112") - senderBalance := big.NewInt(100000) - gasLimit := uint64(1000) + senderBalance := big.NewInt(1000000000) + gasLimit := uint64(100000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) @@ -109,7 +110,7 @@ func TestScCallShouldWork(t *testing.T) { tx := vm.CreateTransaction(idx, big.NewInt(0), sndAddr, scAddress, gasPrice, gasLimit, []byte("increment")) calculatedGasLimit := vm.ComputeGasLimit(nil, testContext, tx) - require.Equal(t, uint64(418), calculatedGasLimit) + require.Equal(t, uint64(15704), calculatedGasLimit) returnCode, errProcess := testContext.TxProcessor.ProcessTransaction(tx) require.Nil(t, errProcess) @@ -122,19 +123,23 @@ func TestScCallShouldWork(t *testing.T) { ret := vm.GetIntValueFromSC(nil, testContext.Accounts, scAddress, "get") require.Equal(t, big.NewInt(11), ret) - expectedBalance := big.NewInt(58200) + expectedBalance := big.NewInt(998429600) vm.TestAccount(t, testContext.Accounts, sndAddr, 10, expectedBalance) // check accumulated fees accumulatedFees := testContext.TxFeeHandler.GetAccumulatedFees() - require.Equal(t, big.NewInt(53700), accumulatedFees) + require.Equal(t, big.NewInt(1582300), accumulatedFees) developerFees := testContext.TxFeeHandler.GetDeveloperFees() - require.Equal(t, big.NewInt(4479), developerFees) + require.Equal(t, big.NewInt(157339), developerFees) } func TestScCallContractNotFoundShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -163,11 +168,15 @@ func TestScCallContractNotFoundShouldConsumeGas(t *testing.T) { } func TestScCallInvalidMethodToCallShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() - scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) utils.CleanAccumulatedIntermediateTransactions(t, testContext) sndAddr := []byte("12345678901234567890123456789112") @@ -196,11 +205,15 @@ func TestScCallInvalidMethodToCallShouldConsumeGas(t *testing.T) { } func TestScCallInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() - scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) sndAddr := []byte("12345678901234567890123456789112") senderBalance := big.NewInt(100000) @@ -230,11 +243,15 @@ func TestScCallInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { } func TestScCallOutOfGasShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() - scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) utils.CleanAccumulatedIntermediateTransactions(t, testContext) sndAddr := []byte("12345678901234567890123456789112") @@ -263,20 +280,24 @@ func TestScCallOutOfGasShouldConsumeGas(t *testing.T) { } func TestScCallAndGasChangeShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, - }) + }, 1) require.Nil(t, err) defer testContext.Close() mockGasSchedule := testContext.GasSchedule.(*mock.GasScheduleNotifierMock) - scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm") + scAddress, _ := utils.DoDeploy(t, testContext, "../wasm/testdata/counter/output/counter.wasm", 9988100, 11900, 399) utils.CleanAccumulatedIntermediateTransactions(t, testContext) sndAddr := []byte("12345678901234567890123456789112") senderBalance := big.NewInt(10000000) - gasLimit := uint64(1000) + gasLimit := uint64(100000) _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) numIterations := uint64(10) @@ -308,7 +329,11 @@ func TestScCallAndGasChangeShouldWork(t *testing.T) { } func TestESDTScCallAndGasChangeShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -368,7 +393,6 @@ func prepareTestContextForEpoch460(tb testing.TB) (*vm.VMTestContext, []byte) { testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ GovernanceEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, ScheduledMiniBlocksEnableEpoch: unreachableEpoch, CorrectJailedNotUnstakedEmptyQueueEpoch: unreachableEpoch, OptimizeNFTStoreEnableEpoch: unreachableEpoch, @@ -398,7 +422,7 @@ func prepareTestContextForEpoch460(tb testing.TB) (*vm.VMTestContext, []byte) { RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, RuntimeMemStoreLimitEnableEpoch: unreachableEpoch, MaxBlockchainHookCountersEnableEpoch: unreachableEpoch, - }) + }, 1) require.Nil(tb, err) senderBalance := big.NewInt(1000000000000000000) @@ -419,6 +443,10 @@ func prepareTestContextForEpoch460(tb testing.TB) (*vm.VMTestContext, []byte) { } func TestScCallBuyNFT_OneFailedTxAndOneOkTx(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, scAddress := prepareTestContextForEpoch460(t) defer testContext.Close() @@ -488,6 +516,10 @@ func TestScCallBuyNFT_OneFailedTxAndOneOkTx(t *testing.T) { } func TestScCallBuyNFT_TwoOkTxs(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, scAddress := prepareTestContextForEpoch460(t) defer testContext.Close() @@ -557,6 +589,10 @@ func TestScCallBuyNFT_TwoOkTxs(t *testing.T) { } func TestScCallDistributeStakingRewards_ShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testContext, scAddress := prepareTestContextForEpoch836(t) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/scDeploy_test.go b/integrationTests/vm/txsFee/scDeploy_test.go index 875fde2fe58..ea646e6db73 100644 --- a/integrationTests/vm/txsFee/scDeploy_test.go +++ b/integrationTests/vm/txsFee/scDeploy_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package txsFee import ( @@ -17,7 +13,11 @@ import ( ) func TestScDeployShouldWork(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -48,7 +48,11 @@ func TestScDeployShouldWork(t *testing.T) { } func TestScDeployInvalidContractCodeShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -80,7 +84,11 @@ func TestScDeployInvalidContractCodeShouldConsumeGas(t *testing.T) { } func TestScDeployInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() @@ -111,7 +119,11 @@ func TestScDeployInsufficientGasLimitShouldNotConsumeGas(t *testing.T) { } func TestScDeployOutOfGasShouldConsumeGas(t *testing.T) { - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/utils/utils.go b/integrationTests/vm/txsFee/utils/utils.go index 40f91cbe1c9..bc7abfbeaf0 100644 --- a/integrationTests/vm/txsFee/utils/utils.go +++ b/integrationTests/vm/txsFee/utils/utils.go @@ -37,8 +37,11 @@ func DoDeploy( t *testing.T, testContext *vm.VMTestContext, pathToContract string, + expectedBalance, + expectedAccFees, + expectedDevFees int64, ) (scAddr []byte, owner []byte) { - return doDeployInternal(t, testContext, pathToContract, 88100, 11900, 399) + return doDeployInternal(t, testContext, pathToContract, expectedBalance, expectedAccFees, expectedDevFees) } // DoDeployOldCounter - @@ -47,7 +50,7 @@ func DoDeployOldCounter( testContext *vm.VMTestContext, pathToContract string, ) (scAddr []byte, owner []byte) { - return doDeployInternal(t, testContext, pathToContract, 89030, 10970, 368) + return doDeployInternal(t, testContext, pathToContract, 9989030, 10970, 368) } func doDeployInternal( @@ -58,7 +61,7 @@ func doDeployInternal( ) (scAddr []byte, owner []byte) { owner = []byte("12345678901234567890123456789011") senderNonce := uint64(0) - senderBalance := big.NewInt(100000) + senderBalance := big.NewInt(10000000) gasPrice := uint64(10) gasLimit := uint64(2000) diff --git a/integrationTests/vm/txsFee/validatorSC_test.go b/integrationTests/vm/txsFee/validatorSC_test.go index 31fbaea8dae..c54025a90b1 100644 --- a/integrationTests/vm/txsFee/validatorSC_test.go +++ b/integrationTests/vm/txsFee/validatorSC_test.go @@ -10,12 +10,12 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" vmAddr "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -28,6 +28,9 @@ const ( validatorStakeData = "stake@01@" + validatorBLSKey + "@0b823739887c40e9331f70c5a140623dfaf4558a9138b62f4473b26bbafdd4f58cb5889716a71c561c9e20e7a280e985@b2a11555ce521e4944e09ab17549d85b487dcd26c84b5017a39e31a3670889ba" cannotUnBondTokensMessage = "cannot unBond tokens, the validator would remain without min deposit, nodes are still active" noTokensToUnBondMessage = "no tokens that can be unbond at this time" + delegationManagementKey = "delegationManagement" + stakingV4Step1EnableEpoch = 4443 + stakingV4Step2EnableEpoch = 4444 ) var ( @@ -36,8 +39,6 @@ var ( value200EGLD, _ = big.NewInt(0).SetString("200000000000000000000", 10) ) -const delegationManagementKey = "delegationManagement" - func saveDelegationManagerConfig(testContext *vm.VMTestContext) { acc, _ := testContext.Accounts.LoadAccount(vmAddr.DelegationManagerSCAddress) userAcc, _ := acc.(state.UserAccountHandler) @@ -49,12 +50,16 @@ func saveDelegationManagerConfig(testContext *vm.VMTestContext) { } func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondShouldRefund(t *testing.T) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) saveDelegationManagerConfig(testContextMeta) @@ -105,12 +110,23 @@ func checkReturnLog(t *testing.T, testContextMeta *vm.VMTestContext, subStr stri } func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondTokensShouldRefund(t *testing.T) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( + core.MetachainShardId, + config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, + 1, + ) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) @@ -135,24 +151,32 @@ func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondTokensShouldRefund(t *tes } func TestValidatorsSC_DoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + argUnbondTokensV1 := config.EnableEpochs{ UnbondTokensV2EnableEpoch: 20000, + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, } testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t, argUnbondTokensV1) argUnbondTokensV2 := config.EnableEpochs{ UnbondTokensV2EnableEpoch: 0, + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, } testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t, argUnbondTokensV2) } func testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *testing.T, enableEpochs config.EnableEpochs) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, enableEpochs) + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, enableEpochs, 1) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 0}) @@ -174,12 +198,23 @@ func testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *t } func TestValidatorsSC_ToStakePutInQueueUnStakeAndUnBondShouldRefundUnBondTokens(t *testing.T) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( + core.MetachainShardId, + config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, + 1, + ) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) @@ -220,12 +255,23 @@ func TestValidatorsSC_ToStakePutInQueueUnStakeAndUnBondShouldRefundUnBondTokens( } func TestValidatorsSC_ToStakePutInQueueUnStakeNodesAndUnBondNodesShouldRefund(t *testing.T) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + if testing.Short() { + t.Skip("this is not a short test") + } + + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( + core.MetachainShardId, + config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, + 1, + ) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) @@ -278,22 +324,3 @@ func executeTxAndCheckResults( require.Equal(t, vmCodeExpected, recCode) require.Equal(t, expectedErr, err) } - -func saveNodesConfig(t *testing.T, testContext *vm.VMTestContext, stakedNodes, minNumNodes, maxNumNodes int64) { - protoMarshalizer := &marshal.GogoProtoMarshalizer{} - - account, err := testContext.Accounts.LoadAccount(vmAddr.StakingSCAddress) - require.Nil(t, err) - userAccount, _ := account.(state.UserAccountHandler) - - nodesConfigData := &systemSmartContracts.StakingNodesConfig{ - StakedNodes: stakedNodes, - MinNumNodes: minNumNodes, - MaxNumNodes: maxNumNodes, - } - nodesDataBytes, _ := protoMarshalizer.Marshal(nodesConfigData) - - _ = userAccount.SaveKeyValue([]byte("nodesConfig"), nodesDataBytes) - _ = testContext.Accounts.SaveAccount(account) - _, _ = testContext.Accounts.Commit() -} diff --git a/integrationTests/vm/wasm/badcontracts/badcontracts_test.go b/integrationTests/vm/wasm/badcontracts/badcontracts_test.go index e4b3b1b7ab7..3ccd475e739 100644 --- a/integrationTests/vm/wasm/badcontracts/badcontracts_test.go +++ b/integrationTests/vm/wasm/badcontracts/badcontracts_test.go @@ -1,5 +1,3 @@ -//go:build !race - package badcontracts import ( @@ -11,9 +9,8 @@ import ( ) func Test_Bad_C_NoPanic(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } context := wasm.SetupTestContext(t) @@ -53,6 +50,10 @@ func Test_Bad_C_NoPanic(t *testing.T) { } func Test_Empty_C_NoPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -63,6 +64,10 @@ func Test_Empty_C_NoPanic(t *testing.T) { } func Test_Corrupt_NoPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -73,6 +78,10 @@ func Test_Corrupt_NoPanic(t *testing.T) { } func Test_NoMemoryDeclaration_NoPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -83,6 +92,10 @@ func Test_NoMemoryDeclaration_NoPanic(t *testing.T) { } func Test_BadFunctionNames_NoPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -91,6 +104,10 @@ func Test_BadFunctionNames_NoPanic(t *testing.T) { } func Test_BadReservedFunctions(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() diff --git a/integrationTests/vm/wasm/delegation/delegationSimulation_test.go b/integrationTests/vm/wasm/delegation/delegationSimulation_test.go index be67b8d32b1..55be9681586 100644 --- a/integrationTests/vm/wasm/delegation/delegationSimulation_test.go +++ b/integrationTests/vm/wasm/delegation/delegationSimulation_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( diff --git a/integrationTests/vm/wasm/delegation/delegation_test.go b/integrationTests/vm/wasm/delegation/delegation_test.go index 9f4d3501c1c..b921f4cfb0f 100644 --- a/integrationTests/vm/wasm/delegation/delegation_test.go +++ b/integrationTests/vm/wasm/delegation/delegation_test.go @@ -1,5 +1,3 @@ -//go:build !race - package delegation import ( @@ -33,9 +31,8 @@ var NewBalanceBig = wasm.NewBalanceBig var RequireAlmostEquals = wasm.RequireAlmostEquals func TestDelegation_Claims(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } context := wasm.SetupTestContext(t) @@ -86,12 +83,12 @@ func TestDelegation_Claims(t *testing.T) { context.GasLimit = 30000000 err = context.ExecuteSC(&context.Alice, "claimRewards") require.Nil(t, err) - require.Equal(t, 8148760, int(context.LastConsumedFee)) + require.Equal(t, 8577123, int(context.LastConsumedFee)) RequireAlmostEquals(t, NewBalance(600), NewBalanceBig(context.GetAccountBalanceDelta(&context.Alice))) err = context.ExecuteSC(&context.Bob, "claimRewards") require.Nil(t, err) - require.Equal(t, 8059660, int(context.LastConsumedFee)) + require.Equal(t, 8420179, int(context.LastConsumedFee)) RequireAlmostEquals(t, NewBalance(400), NewBalanceBig(context.GetAccountBalanceDelta(&context.Bob))) err = context.ExecuteSC(&context.Carol, "claimRewards") diff --git a/integrationTests/vm/wasm/delegation/testRunner.go b/integrationTests/vm/wasm/delegation/testRunner.go index 343f3dace0f..e7bcb516b45 100644 --- a/integrationTests/vm/wasm/delegation/testRunner.go +++ b/integrationTests/vm/wasm/delegation/testRunner.go @@ -53,8 +53,7 @@ func RunDelegationStressTest( MaxBatchSize: 45000, MaxOpenFiles: 10, } - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(dbConfig) if err != nil { return nil, err } diff --git a/integrationTests/vm/wasm/developerRewards/developerRewards_test.go b/integrationTests/vm/wasm/developerRewards/developerRewards_test.go new file mode 100644 index 00000000000..a23493abc9f --- /dev/null +++ b/integrationTests/vm/wasm/developerRewards/developerRewards_test.go @@ -0,0 +1,78 @@ +package transfers + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" + "github.com/stretchr/testify/require" +) + +func TestClaimDeveloperRewards(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + wasmPath := "../testdata/developer-rewards/output/developer_rewards.wasm" + + t.Run("rewards for user", func(t *testing.T) { + context := wasm.SetupTestContext(t) + defer context.Close() + + err := context.DeploySC(wasmPath, "") + require.Nil(t, err) + contractAddress := context.ScAddress + + err = context.ExecuteSC(&context.Owner, "doSomething") + require.Nil(t, err) + + ownerBalanceBefore := context.GetAccountBalance(&context.Owner).Uint64() + rewardBig := context.GetAccount(contractAddress).GetDeveloperReward() + reward := rewardBig.Uint64() + + err = context.ExecuteSC(&context.Owner, "ClaimDeveloperRewards") + require.Nil(t, err) + + ownerBalanceAfter := context.GetAccountBalance(&context.Owner).Uint64() + require.Equal(t, ownerBalanceBefore-context.LastConsumedFee+reward, ownerBalanceAfter) + + events := context.LastLogs[0].GetLogEvents() + require.Equal(t, "ClaimDeveloperRewards", string(events[0].GetIdentifier())) + require.Equal(t, rewardBig.Bytes(), events[0].GetTopics()[0]) + require.Equal(t, context.Owner.Address, events[0].GetTopics()[1]) + }) + + t.Run("rewards for contract", func(t *testing.T) { + context := wasm.SetupTestContext(t) + defer context.Close() + + err := context.DeploySC(wasmPath, "") + require.Nil(t, err) + parentContractAddress := context.ScAddress + + err = context.ExecuteSC(&context.Owner, "deployChild") + require.Nil(t, err) + + chilContractdAddress := context.QuerySCBytes("getChildAddress", [][]byte{}) + require.NotNil(t, chilContractdAddress) + + context.ScAddress = chilContractdAddress + err = context.ExecuteSC(&context.Owner, "doSomething") + require.Nil(t, err) + + contractBalanceBefore := context.GetAccount(parentContractAddress).GetBalance().Uint64() + rewardBig := context.GetAccount(chilContractdAddress).GetDeveloperReward() + reward := rewardBig.Uint64() + + context.ScAddress = parentContractAddress + err = context.ExecuteSC(&context.Owner, "claimDeveloperRewardsOnChild") + require.Nil(t, err) + + contractBalanceAfter := context.GetAccount(parentContractAddress).GetBalance().Uint64() + require.Equal(t, contractBalanceBefore+reward, contractBalanceAfter) + + events := context.LastLogs[0].GetLogEvents() + require.Equal(t, "ClaimDeveloperRewards", string(events[0].GetIdentifier())) + require.Equal(t, rewardBig.Bytes(), events[0].GetTopics()[0]) + require.Equal(t, parentContractAddress, events[0].GetTopics()[1]) + }) +} diff --git a/integrationTests/vm/wasm/erc20/erc20_test.go b/integrationTests/vm/wasm/erc20/erc20_test.go index 7eed879eb50..ef4f45bf02c 100644 --- a/integrationTests/vm/wasm/erc20/erc20_test.go +++ b/integrationTests/vm/wasm/erc20/erc20_test.go @@ -1,5 +1,3 @@ -//go:build !race - package erc20 import ( @@ -10,9 +8,8 @@ import ( ) func Test_C_001(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } context := wasm.SetupTestContext(t) diff --git a/integrationTests/vm/wasm/queries/queries_test.go b/integrationTests/vm/wasm/queries/queries_test.go new file mode 100644 index 00000000000..e83170e6e0b --- /dev/null +++ b/integrationTests/vm/wasm/queries/queries_test.go @@ -0,0 +1,242 @@ +package queries + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/factory" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + + "github.com/stretchr/testify/require" +) + +type now struct { + blockNonce uint64 + stateRootHash []byte +} + +func TestQueries(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + snapshotsOfGetNow := make(map[uint64]now) + snapshotsOfGetState := make(map[uint64]int) + historyOfGetNow := make(map[uint64]now) + historyOfGetState := make(map[uint64]int) + + network := integrationTests.NewMiniNetwork() + defer network.Stop() + + scOwner := network.AddUser(big.NewInt(10000000000000)) + + network.Start() + + // Block 1 + + scAddress := deploy(t, network, scOwner.Address, "../testdata/history/output/history.wasm") + network.Continue(t, 1) + + // Block 2 + + now := queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetNow[1] = now + network.Continue(t, 1) + + // Block 3 + + now = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetNow[2] = now + setState(t, network, scAddress, scOwner.Address, 42) + network.Continue(t, 1) + + // Block 4 + + state := getState(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetState[3] = state + now = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetNow[3] = now + setState(t, network, scAddress, scOwner.Address, 43) + network.Continue(t, 1) + + // Block 4 + + state = getState(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetState[4] = state + now = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetNow[4] = now + network.Continue(t, 1) + + // Check snapshots + block1, _ := network.ShardNode.GetShardHeader(1) + block2, _ := network.ShardNode.GetShardHeader(2) + block3, _ := network.ShardNode.GetShardHeader(3) + + require.Equal(t, uint64(1), snapshotsOfGetNow[1].blockNonce) + require.Equal(t, uint64(2), snapshotsOfGetNow[2].blockNonce) + require.Equal(t, uint64(3), snapshotsOfGetNow[3].blockNonce) + require.Equal(t, uint64(4), snapshotsOfGetNow[4].blockNonce) + + require.Equal(t, block1.GetRootHash(), snapshotsOfGetNow[1].stateRootHash) + require.Equal(t, block1.GetRootHash(), snapshotsOfGetNow[2].stateRootHash) + require.NotEqual(t, block2.GetRootHash(), snapshotsOfGetNow[3].stateRootHash) + require.NotEqual(t, block3.GetRootHash(), snapshotsOfGetNow[4].stateRootHash) + + require.Equal(t, 42, snapshotsOfGetState[3]) + require.Equal(t, 43, snapshotsOfGetState[4]) + + // Check history + historyOfGetState[1] = getState(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 1}) + historyOfGetNow[1] = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 1}) + + historyOfGetState[2] = getState(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 2}) + historyOfGetNow[2] = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 2}) + + historyOfGetState[3] = getState(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 3}) + historyOfGetNow[3] = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 3}) + + historyOfGetState[4] = getState(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 4}) + historyOfGetNow[4] = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 4}) + + require.Equal(t, snapshotsOfGetState[1], historyOfGetState[1]) + require.Equal(t, snapshotsOfGetNow[1].blockNonce, historyOfGetNow[1].blockNonce) + + require.Equal(t, snapshotsOfGetState[2], historyOfGetState[2]) + require.Equal(t, snapshotsOfGetNow[2].blockNonce, historyOfGetNow[2].blockNonce) + + require.Equal(t, snapshotsOfGetState[3], historyOfGetState[3]) + require.Equal(t, snapshotsOfGetNow[3].blockNonce, historyOfGetNow[3].blockNonce) + + require.Equal(t, snapshotsOfGetState[4], historyOfGetState[4]) + require.Equal(t, snapshotsOfGetNow[4].blockNonce, historyOfGetNow[4].blockNonce) +} + +func deploy(t *testing.T, network *integrationTests.MiniNetwork, sender []byte, codePath string) []byte { + code := wasm.GetSCCode(codePath) + data := fmt.Sprintf("%s@%s@0100", code, hex.EncodeToString(factory.WasmVirtualMachine)) + + _, err := network.SendTransaction( + sender, + make([]byte, 32), + big.NewInt(0), + data, + 1000, + ) + require.NoError(t, err) + + scAddress, _ := network.ShardNode.BlockchainHook.NewAddress(sender, 0, factory.WasmVirtualMachine) + return scAddress +} + +func setState(t *testing.T, network *integrationTests.MiniNetwork, scAddress []byte, sender []byte, value uint64) { + data := fmt.Sprintf("setState@%x", value) + + _, err := network.SendTransaction( + sender, + scAddress, + big.NewInt(0), + data, + 1000, + ) + + require.NoError(t, err) +} + +func getState(t *testing.T, node *integrationTests.TestProcessorNode, scAddress []byte, blockNonce core.OptionalUint64) int { + vmOutput, _, err := node.SCQueryService.ExecuteQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: "getState", + Arguments: [][]byte{}, + BlockNonce: blockNonce, + }) + + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + data := vmOutput.ReturnData + + return int(big.NewInt(0).SetBytes(data[0]).Uint64()) +} + +func queryHistoryGetNow(t *testing.T, node *integrationTests.TestProcessorNode, scAddress []byte, blockNonce core.OptionalUint64) now { + vmOutput, _, err := node.SCQueryService.ExecuteQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: "getNow", + Arguments: [][]byte{}, + BlockNonce: blockNonce, + }) + + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + data := vmOutput.ReturnData + + return now{ + blockNonce: big.NewInt(0).SetBytes(data[0]).Uint64(), + stateRootHash: data[1], + } +} + +func TestQueries_Metachain(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + network := integrationTests.NewMiniNetwork() + defer network.Stop() + + network.Start() + + alice := network.AddUser(big.NewInt(10000000000000)) + + // Issue fungible token + issueCost := big.NewInt(1000) + tokenNameHex := hex.EncodeToString([]byte("Test")) + tokenTickerHex := hex.EncodeToString([]byte("TEST")) + txData := fmt.Sprintf("issue@%s@%s@64@00", tokenNameHex, tokenTickerHex) + + _, err := network.SendTransaction( + alice.Address, + vm.ESDTSCAddress, + issueCost, + txData, + core.MinMetaTxExtraGasCost, + ) + + require.NoError(t, err) + network.Continue(t, 5) + + tokens, err := network.MetachainNode.Node.GetAllIssuedESDTs(core.FungibleESDT, context.Background()) + require.NoError(t, err) + require.Len(t, tokens, 1) + + // Query token on older block (should fail) + vmOutput, _, err := network.MetachainNode.SCQueryService.ExecuteQuery(&process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getTokenProperties", + Arguments: [][]byte{[]byte(tokens[0])}, + BlockNonce: core.OptionalUint64{HasValue: true, Value: 2}, + }) + + require.Nil(t, err) + require.Equal(t, vmcommon.UserError, vmOutput.ReturnCode) + require.Equal(t, "no ticker with given name", vmOutput.ReturnMessage) + + // Query token on newer block (should succeed) + vmOutput, _, err = network.MetachainNode.SCQueryService.ExecuteQuery(&process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getTokenProperties", + Arguments: [][]byte{[]byte(tokens[0])}, + BlockNonce: core.OptionalUint64{HasValue: true, Value: 4}, + }) + + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + require.Equal(t, "Test", string(vmOutput.ReturnData[0])) +} diff --git a/integrationTests/vm/wasm/testdata/developer-rewards/developer_rewards.c b/integrationTests/vm/wasm/testdata/developer-rewards/developer_rewards.c new file mode 100644 index 00000000000..a5d3d70d891 --- /dev/null +++ b/integrationTests/vm/wasm/testdata/developer-rewards/developer_rewards.c @@ -0,0 +1,108 @@ +typedef unsigned char byte; +typedef unsigned int i32; +typedef unsigned long long i64; + +void getSCAddress(byte *address); +int storageStore(byte *key, int keyLength, byte *data, int dataLength); +int storageLoad(byte *key, int keyLength, byte *data); +void finish(byte *data, int length); + +int deployFromSourceContract( + long long gas, + byte *value, + byte *sourceContractAddress, + byte *codeMetadata, + byte *newAddress, + int numInitArgs, + byte *initArgLengths, + byte *initArgs); + +i32 createAsyncCall( + byte *destination, + byte *value, + byte *data, + int dataLength, + byte *success, + int successLength, + byte *error, + int errorLength, + long long gas, + long long extraGasForCallback); + +static const i32 ADDRESS_LENGTH = 32; + +byte zero32_red[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +byte zero32_green[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + +byte zeroEGLD[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + +byte codeMetadataUpgradeableReadable[2] = {5, 0}; + +byte emptyArguments[0] = {}; +int emptyArgumentsLengths[0] = {}; +int gasLimitDeploySelf = 20000000; +int gasLimitClaimDeveloperRewards = 6000000; + +byte functionNameClaimDeveloperRewards[] = "ClaimDeveloperRewards"; +byte functionNameDoSomething[] = "doSomething"; +byte storageKeyChildAddress[] = "child"; +byte something[] = "something"; + +void init() +{ +} + +void upgrade() +{ +} + +void doSomething() +{ + finish(something, sizeof(something) - 1); +} + +void deployChild() +{ + byte *selfAddress = zero32_red; + byte *newAddress = zero32_green; + + getSCAddress(selfAddress); + + deployFromSourceContract( + gasLimitDeploySelf, + zeroEGLD, + selfAddress, + codeMetadataUpgradeableReadable, + newAddress, + 0, + (byte *)emptyArgumentsLengths, + emptyArguments); + + storageStore(storageKeyChildAddress, sizeof(storageKeyChildAddress) - 1, newAddress, ADDRESS_LENGTH); +} + +void getChildAddress() +{ + byte *childAddress = zero32_red; + storageLoad(storageKeyChildAddress, sizeof(storageKeyChildAddress) - 1, childAddress); + finish(childAddress, ADDRESS_LENGTH); +} + +void claimDeveloperRewardsOnChild() +{ + byte *childAddress = zero32_red; + storageLoad(storageKeyChildAddress, sizeof(storageKeyChildAddress) - 1, childAddress); + + createAsyncCall( + childAddress, + zeroEGLD, + functionNameClaimDeveloperRewards, + sizeof(functionNameClaimDeveloperRewards) - 1, + 0, + 0, + 0, + 0, + gasLimitClaimDeveloperRewards, + 0); +} diff --git a/integrationTests/vm/wasm/testdata/developer-rewards/developer_rewards.export b/integrationTests/vm/wasm/testdata/developer-rewards/developer_rewards.export new file mode 100644 index 00000000000..b71813a510f --- /dev/null +++ b/integrationTests/vm/wasm/testdata/developer-rewards/developer_rewards.export @@ -0,0 +1,6 @@ +init +upgrade +doSomething +deployChild +getChildAddress +claimDeveloperRewardsOnChild diff --git a/integrationTests/vm/wasm/testdata/developer-rewards/output/developer_rewards.wasm b/integrationTests/vm/wasm/testdata/developer-rewards/output/developer_rewards.wasm new file mode 100755 index 00000000000..7d8df93e210 Binary files /dev/null and b/integrationTests/vm/wasm/testdata/developer-rewards/output/developer_rewards.wasm differ diff --git a/integrationTests/vm/wasm/testdata/history/history.c b/integrationTests/vm/wasm/testdata/history/history.c new file mode 100644 index 00000000000..322e216aca8 --- /dev/null +++ b/integrationTests/vm/wasm/testdata/history/history.c @@ -0,0 +1,51 @@ +typedef unsigned char byte; +typedef unsigned int i32; +typedef unsigned long long i64; + +int getArgument(int argumentIndex, byte *argument); +long long int64getArgument(int argumentIndex); +long long getBlockNonce(); +long long getBlockEpoch(); +void getStateRootHash(byte *hash); + +int int64storageStore(byte *key, int keyLength, long long value); +long long int64storageLoad(byte *key, int keyLength); + +void finish(byte *data, int length); +void int64finish(long long value); + +byte zero32_buffer_a[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +byte zero32_buffer_b[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +byte zero32_buffer_c[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +byte storageKey[] = "state"; + +void init() +{ +} + +void upgrade() +{ +} + +void setState() +{ + i64 state = int64getArgument(0); + int64storageStore(storageKey, sizeof(storageKey) - 1, state); +} + +void getState() +{ + i64 state = int64storageLoad(storageKey, sizeof(storageKey) - 1); + int64finish(state); +} + +void getNow() +{ + i64 nonce = getBlockNonce(); + + byte *stateRootHash = zero32_buffer_a; + getStateRootHash(stateRootHash); + + int64finish(nonce); + finish(stateRootHash, 32); +} diff --git a/integrationTests/vm/wasm/testdata/history/history.export b/integrationTests/vm/wasm/testdata/history/history.export new file mode 100644 index 00000000000..b6646aa3aef --- /dev/null +++ b/integrationTests/vm/wasm/testdata/history/history.export @@ -0,0 +1,5 @@ +init +upgrade +getNow +setState +getState diff --git a/integrationTests/vm/wasm/testdata/history/output/history.wasm b/integrationTests/vm/wasm/testdata/history/output/history.wasm new file mode 100755 index 00000000000..5e34d9a0ab0 Binary files /dev/null and b/integrationTests/vm/wasm/testdata/history/output/history.wasm differ diff --git a/integrationTests/vm/wasm/testdata/transferValue/output/transferValue.wasm b/integrationTests/vm/wasm/testdata/transferValue/output/transferValue.wasm new file mode 100755 index 00000000000..cea133a3b2f Binary files /dev/null and b/integrationTests/vm/wasm/testdata/transferValue/output/transferValue.wasm differ diff --git a/integrationTests/vm/wasm/testdata/transferValue/transferValue.c b/integrationTests/vm/wasm/testdata/transferValue/transferValue.c new file mode 100644 index 00000000000..e82fc4054d8 --- /dev/null +++ b/integrationTests/vm/wasm/testdata/transferValue/transferValue.c @@ -0,0 +1,58 @@ +typedef unsigned char byte; +typedef unsigned int i32; +typedef unsigned long long i64; + +int getArgument(int argumentIndex, byte *argument); +int transferValueExecute(byte *destination, byte *value, long long gas, byte *function, int functionLength, int numArguments, byte *argumentsLengths, byte *arguments); +void getCaller(byte *callerAddress); +i32 createAsyncCall(byte *destination, byte *value, byte *data, int dataLength, byte *success, int successLength, byte *error, int errorLength, long long gas, long long extraGasForCallback); + +byte zero32_a[32] = {0}; +byte zero32_b[32] = {0}; +byte zero32_c[32] = {0}; + +byte oneAtomOfEGLD[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}; +byte functionNameAskMoney[] = "askMoney"; +byte functionNameMyCallback[] = "myCallback"; + +void init() +{ +} + +void upgrade() +{ +} + +void fund() +{ +} + +void forwardAskMoney() +{ + byte *otherContract = zero32_a; + getArgument(0, otherContract); + + createAsyncCall( + otherContract, + 0, + functionNameAskMoney, + sizeof(functionNameAskMoney) - 1, + functionNameMyCallback, + sizeof(functionNameMyCallback) - 1, + functionNameMyCallback, + sizeof(functionNameMyCallback) - 1, + 15000000, + 0); +} + +void askMoney() +{ + byte *caller = zero32_a; + + getCaller(caller); + transferValueExecute(caller, oneAtomOfEGLD, 0, 0, 0, 0, 0, 0); +} + +void myCallback() +{ +} diff --git a/integrationTests/vm/wasm/testdata/transferValue/transferValue.export b/integrationTests/vm/wasm/testdata/transferValue/transferValue.export new file mode 100644 index 00000000000..c9613a09af3 --- /dev/null +++ b/integrationTests/vm/wasm/testdata/transferValue/transferValue.export @@ -0,0 +1,6 @@ +init +upgrade +fund +forwardAskMoney +askMoney +myCallback diff --git a/integrationTests/vm/wasm/transfers/transfers_test.go b/integrationTests/vm/wasm/transfers/transfers_test.go new file mode 100644 index 00000000000..63e4b120f02 --- /dev/null +++ b/integrationTests/vm/wasm/transfers/transfers_test.go @@ -0,0 +1,64 @@ +package transfers + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" + "github.com/stretchr/testify/require" +) + +func TestTransfers_DuplicatedTransferValueEvents(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + context := wasm.SetupTestContext(t) + defer context.Close() + + err := context.DeploySC("../testdata/transferValue/output/transferValue.wasm", "") + require.Nil(t, err) + vault := context.ScAddress + + err = context.DeploySC("../testdata/transferValue/output/transferValue.wasm", "") + require.Nil(t, err) + forwarder := context.ScAddress + + // Add money to the vault + context.ScAddress = vault + err = context.ExecuteSCWithValue(&context.Owner, "fund", big.NewInt(42)) + require.Nil(t, err) + + // Ask money from the vault, via the forwarder + context.ScAddress = forwarder + err = context.ExecuteSC(&context.Owner, fmt.Sprintf("forwardAskMoney@%s", hex.EncodeToString(vault))) + require.Nil(t, err) + require.Len(t, context.LastLogs, 1) + require.Len(t, context.LastLogs[0].GetLogEvents(), 5) + + events := context.LastLogs[0].GetLogEvents() + + require.Equal(t, "transferValueOnly", string(events[0].GetIdentifier())) + require.Equal(t, "AsyncCall", string(events[0].GetData())) + require.Equal(t, []byte{}, events[0].GetTopics()[0]) + require.Equal(t, forwarder, events[0].GetAddress()) + require.Equal(t, vault, events[0].GetTopics()[1]) + + require.Equal(t, "transferValueOnly", string(events[1].GetIdentifier())) + require.Equal(t, "BackTransfer", string(events[1].GetData())) + require.Equal(t, []byte{0x01}, events[1].GetTopics()[0]) + require.Equal(t, vault, events[1].GetAddress()) + require.Equal(t, forwarder, events[1].GetTopics()[1]) + + // Duplicated "transferValueOnly" events are fixed in #5936. + require.Equal(t, "transferValueOnly", string(events[2].GetIdentifier())) + require.Equal(t, "AsyncCallback", string(events[2].GetData())) + require.Equal(t, []byte{}, events[2].GetTopics()[0]) + require.Equal(t, vault, events[2].GetAddress()) + require.Equal(t, forwarder, events[2].GetTopics()[1]) + + require.Equal(t, "writeLog", string(events[3].GetIdentifier())) + require.Equal(t, "completedTxEvent", string(events[4].GetIdentifier())) +} diff --git a/integrationTests/vm/wasm/upgrades/upgrades_test.go b/integrationTests/vm/wasm/upgrades/upgrades_test.go index c989498c955..c6313d65e73 100644 --- a/integrationTests/vm/wasm/upgrades/upgrades_test.go +++ b/integrationTests/vm/wasm/upgrades/upgrades_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package upgrades import ( @@ -10,9 +6,7 @@ import ( "math/big" "testing" - "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/integrationTests" - "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/factory" @@ -21,6 +15,10 @@ import ( ) func TestUpgrades_Hello(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -45,6 +43,10 @@ func TestUpgrades_Hello(t *testing.T) { } func TestUpgrades_HelloDoesNotUpgradeWhenNotUpgradeable(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -63,6 +65,10 @@ func TestUpgrades_HelloDoesNotUpgradeWhenNotUpgradeable(t *testing.T) { } func TestUpgrades_HelloUpgradesToNotUpgradeable(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -88,6 +94,10 @@ func TestUpgrades_HelloUpgradesToNotUpgradeable(t *testing.T) { } func TestUpgrades_ParentAndChildContracts(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -127,6 +137,10 @@ func TestUpgrades_ParentAndChildContracts(t *testing.T) { } func TestUpgrades_HelloCannotBeUpgradedByNonOwner(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -147,6 +161,10 @@ func TestUpgrades_HelloCannotBeUpgradedByNonOwner(t *testing.T) { } func TestUpgrades_CounterCannotBeUpgradedByNonOwner(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + context := wasm.SetupTestContext(t) defer context.Close() @@ -172,61 +190,56 @@ func TestUpgrades_HelloTrialAndError(t *testing.T) { t.Skip("this is not a short test") } - network := integrationTests.NewOneNodeNetwork() + network := integrationTests.NewMiniNetwork() defer network.Stop() - alice := []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - bob := []byte("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") - network.Mint(alice, big.NewInt(10000000000000)) - network.Mint(bob, big.NewInt(10000000000000)) + alice := network.AddUser(big.NewInt(10000000000000)) + bob := network.AddUser(big.NewInt(10000000000000)) - network.GoToRoundOne() + network.Start() deployTxData := fmt.Sprintf("%s@%s@0100", wasm.GetSCCode("../testdata/hello-v1/output/answer.wasm"), hex.EncodeToString(factory.WasmVirtualMachine)) upgradeTxData := fmt.Sprintf("upgradeContract@%s@0100", wasm.GetSCCode("../testdata/hello-v2/output/answer.wasm")) // Deploy the smart contract. Alice is the owner - network.AddTxToPool(&transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: vm.CreateEmptyAddress(), - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(deployTxData), - }) + _, err := network.SendTransaction( + alice.Address, + make([]byte, 32), + big.NewInt(0), + deployTxData, + 100000, + ) + require.Nil(t, err) - scAddress, _ := network.Node.BlockchainHook.NewAddress(alice, 0, factory.WasmVirtualMachine) + scAddress, _ := network.ShardNode.BlockchainHook.NewAddress(alice.Address, 0, factory.WasmVirtualMachine) network.Continue(t, 1) - require.Equal(t, []byte{24}, query(t, network.Node, scAddress, "getUltimateAnswer")) + require.Equal(t, []byte{24}, query(t, network.ShardNode, scAddress, "getUltimateAnswer")) // Upgrade as Bob - upgrade should fail, since Alice is the owner - network.AddTxToPool(&transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: bob, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(upgradeTxData), - }) + _, err = network.SendTransaction( + bob.Address, + scAddress, + big.NewInt(0), + upgradeTxData, + 100000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{24}, query(t, network.Node, scAddress, "getUltimateAnswer")) + require.Equal(t, []byte{24}, query(t, network.ShardNode, scAddress, "getUltimateAnswer")) // Now upgrade as Alice, should work - network.AddTxToPool(&transaction.Transaction{ - Nonce: 1, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(upgradeTxData), - }) + _, err = network.SendTransaction( + alice.Address, + scAddress, + big.NewInt(0), + upgradeTxData, + 100000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{42}, query(t, network.Node, scAddress, "getUltimateAnswer")) + require.Equal(t, []byte{42}, query(t, network.ShardNode, scAddress, "getUltimateAnswer")) } func TestUpgrades_CounterTrialAndError(t *testing.T) { @@ -234,75 +247,69 @@ func TestUpgrades_CounterTrialAndError(t *testing.T) { t.Skip("this is not a short test") } - network := integrationTests.NewOneNodeNetwork() + network := integrationTests.NewMiniNetwork() defer network.Stop() - alice := []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - bob := []byte("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") - network.Mint(alice, big.NewInt(10000000000000)) - network.Mint(bob, big.NewInt(10000000000000)) + alice := network.AddUser(big.NewInt(10000000000000)) + bob := network.AddUser(big.NewInt(10000000000000)) - network.GoToRoundOne() + network.Start() deployTxData := fmt.Sprintf("%s@%s@0100", wasm.GetSCCode("../testdata/counter/output/counter.wasm"), hex.EncodeToString(factory.WasmVirtualMachine)) upgradeTxData := fmt.Sprintf("upgradeContract@%s@0100", wasm.GetSCCode("../testdata/counter/output/counter.wasm")) // Deploy the smart contract. Alice is the owner - network.AddTxToPool(&transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: vm.CreateEmptyAddress(), - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(deployTxData), - }) + _, err := network.SendTransaction( + alice.Address, + make([]byte, 32), + big.NewInt(0), + deployTxData, + 100000, + ) + require.Nil(t, err) - scAddress, _ := network.Node.BlockchainHook.NewAddress(alice, 0, factory.WasmVirtualMachine) + scAddress, _ := network.ShardNode.BlockchainHook.NewAddress(alice.Address, 0, factory.WasmVirtualMachine) network.Continue(t, 1) - require.Equal(t, []byte{1}, query(t, network.Node, scAddress, "get")) + require.Equal(t, []byte{1}, query(t, network.ShardNode, scAddress, "get")) // Increment the counter (could be either Bob or Alice) - network.AddTxToPool(&transaction.Transaction{ - Nonce: 1, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte("increment"), - }) + _, err = network.SendTransaction( + alice.Address, + scAddress, + big.NewInt(0), + "increment", + 100000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{2}, query(t, network.Node, scAddress, "get")) + require.Equal(t, []byte{2}, query(t, network.ShardNode, scAddress, "get")) // Upgrade as Bob - upgrade should fail, since Alice is the owner (counter.init() not executed, state not reset) - network.AddTxToPool(&transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: bob, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(upgradeTxData), - }) + _, err = network.SendTransaction( + bob.Address, + scAddress, + big.NewInt(0), + upgradeTxData, + 100000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{2}, query(t, network.Node, scAddress, "get")) + require.Equal(t, []byte{2}, query(t, network.ShardNode, scAddress, "get")) // Now upgrade as Alice, should work (state is reset by counter.init()) - network.AddTxToPool(&transaction.Transaction{ - Nonce: 2, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(upgradeTxData), - }) + _, err = network.SendTransaction( + alice.Address, + scAddress, + big.NewInt(0), + upgradeTxData, + 100000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{1}, query(t, network.Node, scAddress, "get")) + require.Equal(t, []byte{1}, query(t, network.ShardNode, scAddress, "get")) } func query(t *testing.T, node *integrationTests.TestProcessorNode, scAddress []byte, function string) []byte { diff --git a/integrationTests/vm/wasm/utils.go b/integrationTests/vm/wasm/utils.go index e58d3e25c7b..7ec28bb8f45 100644 --- a/integrationTests/vm/wasm/utils.go +++ b/integrationTests/vm/wasm/utils.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/pubkeyConverter" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/rewardTx" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" @@ -52,6 +53,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -102,6 +104,7 @@ type TestContext struct { ScAddress []byte ScCodeMetadata vmcommon.CodeMetadata Accounts *state.AccountsDB + TxLogsProcessor process.TransactionLogProcessor TxProcessor process.TransactionProcessor ScProcessor scrCommon.TestSmartContractProcessor QueryService external.SCQueryService @@ -112,6 +115,7 @@ type TestContext struct { LastTxHash []byte SCRForwarder *mock.IntermediateTransactionHandlerMock LastSCResults []*smartContractResult.SmartContractResult + LastLogs []*data.LogData } type testParticipant struct { @@ -154,7 +158,7 @@ func SetupTestContextWithGasSchedule(t *testing.T, gasSchedule map[string]map[st DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }, context.EpochNotifier) context.RoundNotifier = &epochNotifier.RoundNotifierStub{} - context.EnableRoundsHandler, _ = enablers.NewEnableRoundsHandler(integrationTests.GetDefaultRoundsConfig(), context.RoundNotifier) + context.EnableRoundsHandler, _ = enablers.NewEnableRoundsHandler(testscommon.GetDefaultRoundsConfig(), context.RoundNotifier) context.WasmVMChangeLocker = &sync.RWMutex{} context.initAccounts() @@ -164,7 +168,7 @@ func SetupTestContextWithGasSchedule(t *testing.T, gasSchedule map[string]map[st context.initFeeHandlers() context.initVMAndBlockchainHook() context.initTxProcessorWithOneSCExecutorWithVMs() - context.ScAddress, _ = context.BlockchainHook.NewAddress(context.Owner.Address, context.Owner.Nonce, factory.WasmVirtualMachine) + argsNewSCQueryService := smartContract.ArgsNewSCQueryService{ VmContainer: context.VMContainer, EconomicsFee: context.EconomicsFee, @@ -221,7 +225,7 @@ func (context *TestContext) initFeeHandlers() { RewardsConfigByEpoch: []config.EpochRewardSettings{ { LeaderPercentage: 0.1, - DeveloperPercentage: 0.0, + DeveloperPercentage: 0.3, ProtocolSustainabilityPercentage: 0, ProtocolSustainabilityAddress: testProtocolSustainabilityAddress, TopUpGradientPoint: "1000000", @@ -247,10 +251,9 @@ func (context *TestContext) initFeeHandlers() { MaxGasPriceSetGuardian: "2000000000", }, }, - EpochNotifier: context.EpochNotifier, - EnableEpochsHandler: context.EnableEpochsHandler, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: context.EpochNotifier, + EnableEpochsHandler: context.EnableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -317,6 +320,7 @@ func (context *TestContext) initVMAndBlockchainHook() { WasmVMVersions: []config.WasmVMVersionByEpoch{ {StartEpoch: 0, Version: "*"}, }, + TransferAndExecuteByUserAddresses: []string{"3132333435363738393031323334353637383930313233343536373839303234"}, } esdtTransferParser, _ := parsers.NewESDTTransferParser(marshalizer) @@ -332,6 +336,7 @@ func (context *TestContext) initVMAndBlockchainHook() { WasmVMChangeLocker: context.WasmVMChangeLocker, ESDTTransferParser: esdtTransferParser, Hasher: hasher, + PubKeyConverter: pkConverter, } vmFactory, err := shard.NewVMContainerFactory(argsNewVMFactory) require.Nil(context.T, err) @@ -364,8 +369,11 @@ func (context *TestContext) initTxProcessorWithOneSCExecutorWithVMs() { defaults.FillGasMapInternal(gasSchedule, 1) argsLogProcessor := transactionLog.ArgTxLogProcessor{Marshalizer: marshalizer} - logsProcessor, _ := transactionLog.NewTxLogProcessor(argsLogProcessor) + context.TxLogsProcessor, err = transactionLog.NewTxLogProcessor(argsLogProcessor) + require.Nil(context.T, err) + context.SCRForwarder = &mock.IntermediateTransactionHandlerMock{} + argsNewSCProcessor := scrCommon.ArgsNewSmartContractProcessor{ VmContainer: context.VMContainer, ArgsParser: smartContract.NewArgumentParser(), @@ -384,37 +392,40 @@ func (context *TestContext) initTxProcessorWithOneSCExecutorWithVMs() { GasHandler: &testscommon.GasHandlerStub{ SetGasRefundedCalled: func(gasRefunded uint64, hash []byte) {}, }, - GasSchedule: mock.NewGasScheduleNotifierMock(gasSchedule), - TxLogsProcessor: logsProcessor, - EnableRoundsHandler: context.EnableRoundsHandler, - EnableEpochsHandler: context.EnableEpochsHandler, - WasmVMChangeLocker: context.WasmVMChangeLocker, - VMOutputCacher: txcache.NewDisabledCache(), + GasSchedule: mock.NewGasScheduleNotifierMock(gasSchedule), + TxLogsProcessor: context.TxLogsProcessor, + EnableRoundsHandler: context.EnableRoundsHandler, + EnableEpochsHandler: context.EnableEpochsHandler, + WasmVMChangeLocker: context.WasmVMChangeLocker, + VMOutputCacher: txcache.NewDisabledCache(), + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } - context.ScProcessor, _ = processProxy.NewTestSmartContractProcessorProxy(argsNewSCProcessor, context.EpochNotifier) + context.ScProcessor, err = processProxy.NewTestSmartContractProcessorProxy(argsNewSCProcessor, context.EpochNotifier) require.Nil(context.T, err) argsNewTxProcessor := processTransaction.ArgsNewTxProcessor{ - Accounts: context.Accounts, - Hasher: hasher, - PubkeyConv: pkConverter, - Marshalizer: marshalizer, - SignMarshalizer: marshalizer, - ShardCoordinator: oneShardCoordinator, - ScProcessor: context.ScProcessor, - TxFeeHandler: context.UnsignexTxHandler, - TxTypeHandler: txTypeHandler, - EconomicsFee: context.EconomicsFee, - ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, - BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, - ArgsParser: smartContract.NewArgumentParser(), - ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, - EnableRoundsHandler: context.EnableRoundsHandler, - EnableEpochsHandler: context.EnableEpochsHandler, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, - TxLogsProcessor: logsProcessor, + Accounts: context.Accounts, + Hasher: hasher, + PubkeyConv: pkConverter, + Marshalizer: marshalizer, + SignMarshalizer: marshalizer, + ShardCoordinator: oneShardCoordinator, + ScProcessor: context.ScProcessor, + TxFeeHandler: context.UnsignexTxHandler, + TxTypeHandler: txTypeHandler, + EconomicsFee: context.EconomicsFee, + ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, + BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, + ArgsParser: smartContract.NewArgumentParser(), + ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, + EnableRoundsHandler: context.EnableRoundsHandler, + EnableEpochsHandler: context.EnableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + TxLogsProcessor: context.TxLogsProcessor, + RelayedTxV3Processor: &processMocks.RelayedTxV3ProcessorMock{}, + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } context.TxProcessor, err = processTransaction.NewTxProcessor(argsNewTxProcessor) @@ -488,12 +499,18 @@ func (context *TestContext) TakeAccountBalanceSnapshot(participant *testParticip participant.BalanceSnapshot = context.GetAccountBalance(participant) } -// GetAccountBalance - -func (context *TestContext) GetAccountBalance(participant *testParticipant) *big.Int { - account, err := context.Accounts.GetExistingAccount(participant.Address) +// GetAccount - +func (context *TestContext) GetAccount(address []byte) state.UserAccountHandler { + account, err := context.Accounts.GetExistingAccount(address) require.Nil(context.T, err) accountAsUser := account.(state.UserAccountHandler) - return accountAsUser.GetBalance() + return accountAsUser +} + +// GetAccountBalance - +func (context *TestContext) GetAccountBalance(participant *testParticipant) *big.Int { + account := context.GetAccount(participant.Address) + return account.GetBalance() } // GetAccountBalanceDelta - @@ -544,20 +561,20 @@ func (context *TestContext) DeploySC(wasmPath string, parametersString string) e return err } + context.ScAddress, _ = context.BlockchainHook.NewAddress(context.Owner.Address, context.Owner.Nonce, factory.WasmVirtualMachine) + owner.Nonce++ _, err = context.Accounts.Commit() if err != nil { return err } - err = context.GetCompositeTestError() + err = context.acquireOutcome() if err != nil { return err } - _ = context.UpdateLastSCResults() - - return nil + return context.GetCompositeTestError() } // UpgradeSC - @@ -604,14 +621,12 @@ func (context *TestContext) UpgradeSC(wasmPath string, parametersString string) return err } - err = context.GetCompositeTestError() + err = context.acquireOutcome() if err != nil { return err } - _ = context.UpdateLastSCResults() - - return nil + return context.GetCompositeTestError() } // GetSCCode - @@ -680,18 +695,16 @@ func (context *TestContext) ExecuteSCWithValue(sender *testParticipant, txData s return err } - err = context.GetCompositeTestError() + err = context.acquireOutcome() if err != nil { return err } - _ = context.UpdateLastSCResults() - - return nil + return context.GetCompositeTestError() } -// UpdateLastSCResults -- -func (context *TestContext) UpdateLastSCResults() error { +// acquireOutcome - +func (context *TestContext) acquireOutcome() error { transactions := context.SCRForwarder.GetIntermediateTransactions() context.LastSCResults = make([]*smartContractResult.SmartContractResult, len(transactions)) for i, tx := range transactions { @@ -703,6 +716,8 @@ func (context *TestContext) UpdateLastSCResults() error { } } + context.LastLogs = context.TxLogsProcessor.GetAllCurrentLogs() + return nil } diff --git a/integrationTests/vm/wasm/wasmer/wasmer_test.go b/integrationTests/vm/wasm/wasmer/wasmer_test.go index f73bceae6b5..d7eeb9260a4 100644 --- a/integrationTests/vm/wasm/wasmer/wasmer_test.go +++ b/integrationTests/vm/wasm/wasmer/wasmer_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package wasmer import ( @@ -21,6 +17,10 @@ import ( var ownerAddressBytes = []byte("12345678901234567890123456789012") func TestAllowNonFloatingPointSC(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + wasmvm, scAddress := deploy(t, "../testdata/floating_point/non_fp.wasm") defer closeVM(wasmvm) @@ -37,6 +37,10 @@ func TestAllowNonFloatingPointSC(t *testing.T) { } func TestDisallowFloatingPointSC(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + wasmvm, scAddress := deploy(t, "../testdata/floating_point/fp.wasm") defer closeVM(wasmvm) @@ -53,6 +57,10 @@ func TestDisallowFloatingPointSC(t *testing.T) { } func TestSCAbortExecution_DontAbort(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + wasmvm, scAddress := deploy(t, "../testdata/misc/test_abort/test_abort.wasm") defer closeVM(wasmvm) @@ -74,6 +82,10 @@ func TestSCAbortExecution_DontAbort(t *testing.T) { } func TestSCAbortExecution_Abort(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + wasmvm, scAddress := deploy(t, "../testdata/misc/test_abort/test_abort.wasm") defer closeVM(wasmvm) diff --git a/integrationTests/vm/wasm/wasmvm/asyncMockContracts_test.go b/integrationTests/vm/wasm/wasmvm/asyncMockContracts_test.go index 393ef51f5de..f7a3eece169 100644 --- a/integrationTests/vm/wasm/wasmvm/asyncMockContracts_test.go +++ b/integrationTests/vm/wasm/wasmvm/asyncMockContracts_test.go @@ -59,17 +59,22 @@ func TestMockContract_AsyncLegacy_InShard(t *testing.T) { } func TestMockContract_AsyncLegacy_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testMockContract_CrossShard(t, LegacyAsyncCallType) } func TestMockContract_NewAsync_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + testMockContract_CrossShard(t, NewAsyncCallType) } func testMockContract_CrossShard(t *testing.T, asyncCallType []byte) { - if testing.Short() { - t.Skip("this is not a short test") - } transferEGLD := big.NewInt(42) numberOfShards := 2 diff --git a/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go b/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go index a4cfb755b76..a57599d2866 100644 --- a/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go +++ b/integrationTests/vm/wasm/wasmvm/deployment/deploy_test.go @@ -22,7 +22,7 @@ var senderBalance = big.NewInt(1000000000000) func TestScDeployShouldManageCorrectlyTheCodeMetadata(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorAndAccountsWithVMs( diff --git a/integrationTests/vm/wasm/wasmvm/deployment/upgrade_test.go b/integrationTests/vm/wasm/wasmvm/deployment/upgrade_test.go index 6d52f68acf2..22d2fc48a3f 100644 --- a/integrationTests/vm/wasm/wasmvm/deployment/upgrade_test.go +++ b/integrationTests/vm/wasm/wasmvm/deployment/upgrade_test.go @@ -20,7 +20,7 @@ const gasLimit = uint64(10000000) func TestScUpgradeShouldManageCorrectlyTheCodeMetadata(t *testing.T) { if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") + t.Skip("this is not a short test") } testContext, err := vm.CreatePreparedTxProcessorAndAccountsWithVMs( diff --git a/integrationTests/vm/wasm/wasmvm/executeViaBlockchainhook_test.go b/integrationTests/vm/wasm/wasmvm/executeViaBlockchainhook_test.go index e36c4bb744d..9d12746bff5 100644 --- a/integrationTests/vm/wasm/wasmvm/executeViaBlockchainhook_test.go +++ b/integrationTests/vm/wasm/wasmvm/executeViaBlockchainhook_test.go @@ -1,5 +1,3 @@ -//go:build !race - package wasmvm import ( @@ -17,6 +15,9 @@ import ( ) func TestExecuteOnDestCtx_BlockchainHook(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } net := integrationTests.NewTestNetworkSized(t, 1, 1, 1) net.Start() diff --git a/integrationTests/vm/wasm/wasmvm/gasSchedule_test.go b/integrationTests/vm/wasm/wasmvm/gasSchedule_test.go index 496a31c0c06..735fbdc2ac3 100644 --- a/integrationTests/vm/wasm/wasmvm/gasSchedule_test.go +++ b/integrationTests/vm/wasm/wasmvm/gasSchedule_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package wasmvm import ( @@ -17,22 +13,37 @@ import ( ) func Benchmark_VmDeployWithFibbonacciAndExecute(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/misc/fib_wasm/output/fib_wasm.wasm", 32, "_main", nil, b.N, nil) } func Benchmark_searchingForPanic(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } for i := 0; i < 10; i++ { runWASMVMBenchmark(b, "../testdata/misc/fib_wasm/output/fib_wasm.wasm", 100, "_main", nil, b.N, nil) } } func Test_searchingForPanic(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + for i := 0; i < 10; i++ { runWASMVMBenchmark(t, "../testdata/misc/fib_wasm/output/fib_wasm.wasm", 100, "_main", nil, 1, nil) } } func Benchmark_VmDeployWithBadContractAndExecute(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig("../../../../cmd/node/config/gasSchedules/gasScheduleV4.toml") result, err := RunTest("../testdata/misc/bad.wasm", 0, "bigLoop", nil, b.N, gasSchedule, 1500000000) @@ -47,6 +58,10 @@ func Benchmark_VmDeployWithBadContractAndExecute(b *testing.B) { } func Benchmark_VmDeployWithBadContractAndExecute2(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig("../../../../cmd/node/config/gasSchedules/gasScheduleV4.toml") arg, _ := hex.DecodeString("012c") @@ -62,100 +77,196 @@ func Benchmark_VmDeployWithBadContractAndExecute2(b *testing.B) { } func Benchmark_VmDeployWithCPUCalculateAndExecute(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/misc/cpucalculate_wasm/output/cpucalculate.wasm", 8000, "cpuCalculate", nil, b.N, nil) } func Benchmark_VmDeployWithStringConcatAndExecute(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/misc/stringconcat_wasm/stringconcat_wasm.wasm", 10000, "_main", nil, b.N, nil) } func Benchmark_TestStore100(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/storage100/output/storage100.wasm", 0, "store100", nil, b.N, nil) } func Benchmark_TestStorageBigIntNew(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntNewTest", nil, b.N, nil) } func Benchmark_TestBigIntGetUnSignedBytes(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntGetUnsignedBytesTest", nil, b.N, nil) } func Benchmark_TestBigIntAdd(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntAddTest", nil, b.N, nil) } func Benchmark_TestBigIntMul(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntMulTest", nil, b.N, nil) } func Benchmark_TestBigIntMul25(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntMul25Test", nil, b.N, nil) } func Benchmark_TestBigIntMul32(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntMul32Test", nil, b.N, nil) } func Benchmark_TestBigIntTDiv(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntTDivTest", nil, b.N, nil) } func Benchmark_TestBigIntTMod(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntTModTest", nil, b.N, nil) } func Benchmark_TestBigIntEDiv(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntEDivTest", nil, b.N, nil) } func Benchmark_TestBigIntEMod(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntEModTest", nil, b.N, nil) } func Benchmark_TestBigIntShr(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntShrTest", nil, b.N, nil) } func Benchmark_TestBigIntSetup(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/bigInt/output/cApiTest.wasm", 0, "bigIntInitSetup", nil, b.N, nil) } func Benchmark_TestCryptoSHA256(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "sha256Test", nil, b.N, nil) } func Benchmark_TestCryptoKeccak256(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "keccak256Test", nil, b.N, nil) } func Benchmark_TestCryptoRipMed160(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "ripemd160Test", nil, b.N, nil) } func Benchmark_TestCryptoBLS(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "verifyBLSTest", nil, b.N, nil) } func Benchmark_TestCryptoVerifyED25519(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "verifyEd25519Test", nil, b.N, nil) } func Benchmark_TestCryptoSecp256k1UnCompressed(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "verifySecp256k1UncompressedKeyTest", nil, b.N, nil) } func Benchmark_TestCryptoSecp256k1Compressed(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "verifySecp256k1CompressedKeyTest", nil, b.N, nil) } func Benchmark_TestEllipticCurveInitialVariablesAndCalls(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "initialVariablesAndCallsTest", nil, b.N, nil) } // elliptic curves func Benchmark_TestEllipticCurve(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + testEllipticCurve(b, "p224Add") testEllipticCurve(b, "p256Add") testEllipticCurve(b, "p384Add") @@ -191,21 +302,37 @@ func Benchmark_TestEllipticCurve(b *testing.B) { } func Benchmark_TestEllipticCurveScalarMultP224(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "p224ScalarMultEcTest", getNumberOfRepsAndScalarLengthArgs(10), b.N, gasSchedule) } func Benchmark_TestEllipticCurveScalarMultP256(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "p256ScalarMultEcTest", getNumberOfRepsAndScalarLengthArgs(10), b.N, gasSchedule) } func Benchmark_TestEllipticCurveScalarMultP384(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "p384ScalarMultEcTest", getNumberOfRepsAndScalarLengthArgs(10), b.N, gasSchedule) } func Benchmark_TestEllipticCurveScalarMultP521(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) runWASMVMBenchmark(b, "../testdata/c-api-tests/ecBenchmark/output/ecBenchmark.wasm", 0, "p521ScalarMultEcTest", getNumberOfRepsAndScalarLengthArgs(10), b.N, gasSchedule) } @@ -216,10 +343,18 @@ func testEllipticCurve(b *testing.B, function string) { } func Benchmark_TestCryptoDoNothing(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + runWASMVMBenchmark(b, "../testdata/c-api-tests/crypto/output/cryptoTest.wasm", 0, "doNothing", nil, b.N, nil) } func Benchmark_TestStorageRust(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) buff := make([]byte, 100) _, _ = rand.Read(buff) @@ -228,6 +363,10 @@ func Benchmark_TestStorageRust(b *testing.B) { } func TestGasModel(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) totalOp := uint64(0) diff --git a/integrationTests/vm/wasm/wasmvm/mockContracts.go b/integrationTests/vm/wasm/wasmvm/mockContracts.go index 21c6e6cae55..4e1b2b2b2c2 100644 --- a/integrationTests/vm/wasm/wasmvm/mockContracts.go +++ b/integrationTests/vm/wasm/wasmvm/mockContracts.go @@ -17,14 +17,15 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" + worldmock "github.com/multiversx/mx-chain-scenario-go/worldmock" "github.com/multiversx/mx-chain-vm-go/executor" contextmock "github.com/multiversx/mx-chain-vm-go/mock/context" - worldmock "github.com/multiversx/mx-chain-vm-go/mock/world" "github.com/multiversx/mx-chain-vm-go/testcommon" "github.com/multiversx/mx-chain-vm-go/vmhost" "github.com/stretchr/testify/require" ) +// MockInitialBalance represents a mock balance var MockInitialBalance = big.NewInt(10_000_000) // WalletAddressPrefix is the prefix of any smart contract address used for testing. @@ -191,6 +192,7 @@ func makeTestAddress(_ []byte, identifier string) []byte { return append(leftBytes, rightBytes...) } +// CreateHostAndInstanceBuilder creates a new host and instance builder func CreateHostAndInstanceBuilder(t *testing.T, net *integrationTests.TestNetwork, vmContainer process.VirtualMachinesContainer, diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverter.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverter.go index 64a8bde201f..36a4fb8e51b 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverter.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverter.go @@ -10,15 +10,15 @@ import ( "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" - mge "github.com/multiversx/mx-chain-scenario-go/scenario-exporter" - mgutil "github.com/multiversx/mx-chain-scenario-go/util" + "github.com/multiversx/mx-chain-scenario-go/scenario/exporter" + scenmodel "github.com/multiversx/mx-chain-scenario-go/scenario/model" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) var errReturnCodeNotOk = errors.New("returnCode is not 0(Ok)") // CreateAccountsFromScenariosAccs uses scenariosAccounts to populate the AccountsAdapter -func CreateAccountsFromScenariosAccs(tc *vm.VMTestContext, scenariosUserAccounts []*mge.TestAccount) error { +func CreateAccountsFromScenariosAccs(tc *vm.VMTestContext, scenariosUserAccounts []*exporter.TestAccount) error { for _, scenariosAcc := range scenariosUserAccounts { acc, err := tc.Accounts.LoadAccount(scenariosAcc.GetAddress()) if err != nil { @@ -60,7 +60,7 @@ func CreateAccountsFromScenariosAccs(tc *vm.VMTestContext, scenariosUserAccounts } // CreateTransactionsFromScenariosTxs converts scenarios transactions intro trasnsactions that can be processed by the txProcessor -func CreateTransactionsFromScenariosTxs(scenariosTxs []*mge.Transaction) (transactions []*transaction.Transaction) { +func CreateTransactionsFromScenariosTxs(scenariosTxs []*exporter.Transaction) (transactions []*transaction.Transaction) { var data []byte transactions = make([]*transaction.Transaction, 0) @@ -70,7 +70,7 @@ func CreateTransactionsFromScenariosTxs(scenariosTxs []*mge.Transaction) (transa endpointName := scenariosTx.GetCallFunction() args := scenariosTx.GetCallArguments() if len(esdtTransfers) != 0 { - data = mgutil.CreateMultiTransferData(scenariosTx.GetReceiverAddress(), esdtTransfers, endpointName, args) + data = scenmodel.CreateMultiTransferData(scenariosTx.GetReceiverAddress(), esdtTransfers, endpointName, args) } else { data = createData(endpointName, args) } @@ -92,7 +92,7 @@ func CreateTransactionsFromScenariosTxs(scenariosTxs []*mge.Transaction) (transa } // DeploySCsFromScenariosDeployTxs deploys all smartContracts correspondent to "scDeploy" in a scenarios test, then replaces with the correct computed address in all the transactions. -func DeploySCsFromScenariosDeployTxs(testContext *vm.VMTestContext, deployScenariosTxs []*mge.Transaction) ([][]byte, error) { +func DeploySCsFromScenariosDeployTxs(testContext *vm.VMTestContext, deployScenariosTxs []*exporter.Transaction) ([][]byte, error) { newScAddresses := make([][]byte, 0) for _, deployScenariosTransaction := range deployScenariosTxs { deployedScAddress, err := deploySC(testContext, deployScenariosTransaction) @@ -105,7 +105,7 @@ func DeploySCsFromScenariosDeployTxs(testContext *vm.VMTestContext, deployScenar } // ReplaceScenariosScAddressesWithNewScAddresses corrects the Scenarios SC Addresses, with the new Addresses obtained from deploying the SCs -func ReplaceScenariosScAddressesWithNewScAddresses(deployedScAccounts []*mge.TestAccount, newScAddresses [][]byte, scenariosTxs []*mge.Transaction) { +func ReplaceScenariosScAddressesWithNewScAddresses(deployedScAccounts []*exporter.TestAccount, newScAddresses [][]byte, scenariosTxs []*exporter.Transaction) { for _, newScAddr := range newScAddresses { addressToBeReplaced := deployedScAccounts[0].GetAddress() for _, scenariosTx := range scenariosTxs { @@ -126,7 +126,7 @@ func createData(functionName string, arguments [][]byte) []byte { return builder.ToBytes() } -func deploySC(testContext *vm.VMTestContext, deployScenariosTx *mge.Transaction) (scAddress []byte, err error) { +func deploySC(testContext *vm.VMTestContext, deployScenariosTx *exporter.Transaction) (scAddress []byte, err error) { gasLimit, gasPrice := deployScenariosTx.GetGasLimitAndPrice() ownerAddr := deployScenariosTx.GetSenderAddress() deployData := deployScenariosTx.GetDeployData() diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverterUtils.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverterUtils.go index a701d090e95..ad23085011f 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverterUtils.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverterUtils.go @@ -8,8 +8,8 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/state" logger "github.com/multiversx/mx-chain-logger-go" - mge "github.com/multiversx/mx-chain-scenario-go/scenario-exporter" - mgutil "github.com/multiversx/mx-chain-scenario-go/util" + "github.com/multiversx/mx-chain-scenario-go/scenario/exporter" + scenmodel "github.com/multiversx/mx-chain-scenario-go/scenario/model" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" ) @@ -17,7 +17,7 @@ import ( var log = logger.GetOrCreate("scenariosConverter") // CheckAccounts will verify if scenariosAccounts correspond to AccountsAdapter accounts -func CheckAccounts(t *testing.T, accAdapter state.AccountsAdapter, scenariosAccounts []*mge.TestAccount) { +func CheckAccounts(t *testing.T, accAdapter state.AccountsAdapter, scenariosAccounts []*exporter.TestAccount) { for _, scenariosAcc := range scenariosAccounts { accHandler, err := accAdapter.LoadAccount(scenariosAcc.GetAddress()) require.Nil(t, err) @@ -56,7 +56,7 @@ func CheckStorage(t *testing.T, dataTrie state.UserAccountHandler, scenariosAccS } // CheckTransactions checks if the transactions correspond with the scenariosTransactions -func CheckTransactions(t *testing.T, transactions []*transaction.Transaction, scenariosTransactions []*mge.Transaction) { +func CheckTransactions(t *testing.T, transactions []*transaction.Transaction, scenariosTransactions []*exporter.Transaction) { expectedLength := len(scenariosTransactions) require.Equal(t, expectedLength, len(transactions)) for i := 0; i < expectedLength; i++ { @@ -77,7 +77,7 @@ func CheckTransactions(t *testing.T, transactions []*transaction.Transaction, sc var expectedData []byte if len(expectedEsdtTransfers) != 0 { - expectedData = mgutil.CreateMultiTransferData(expectedReceiver, expectedEsdtTransfers, expectedCallFunction, expectedCallArguments) + expectedData = scenmodel.CreateMultiTransferData(expectedReceiver, expectedEsdtTransfers, expectedCallFunction, expectedCallArguments) require.Equal(t, expectedSender, transactions[i].GetRcvAddr()) } else { require.Equal(t, expectedReceiver, transactions[i].GetRcvAddr()) @@ -97,7 +97,7 @@ func BenchmarkScenariosSpecificTx(b *testing.B, scenariosTestPath string) { return } defer testContext.Close() - if benchmarkTxPos == mge.InvalidBenchmarkTxPos { + if benchmarkTxPos == exporter.InvalidBenchmarkTxPos { log.Trace("no transactions marked for benchmarking") } if len(transactions) > 1 { @@ -115,21 +115,21 @@ func BenchmarkScenariosSpecificTx(b *testing.B, scenariosTestPath string) { // SetStateFromScenariosTest recieves path to scenariosTest, returns a VMTestContext with the specified accounts, an array with the specified transactions and an error func SetStateFromScenariosTest(scenariosTestPath string) (testContext *vm.VMTestContext, transactions []*transaction.Transaction, bechmarkTxPos int, err error) { - stateAndBenchmarkInfo, err := mge.GetAccountsAndTransactionsFromScenarios(scenariosTestPath) + stateAndBenchmarkInfo, err := exporter.GetAccountsAndTransactionsFromScenarios(scenariosTestPath) if err != nil { - return nil, nil, mge.InvalidBenchmarkTxPos, err + return nil, nil, exporter.InvalidBenchmarkTxPos, err } - testContext, err = vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + testContext, err = vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) if err != nil { - return nil, nil, mge.InvalidBenchmarkTxPos, err + return nil, nil, exporter.InvalidBenchmarkTxPos, err } err = CreateAccountsFromScenariosAccs(testContext, stateAndBenchmarkInfo.Accs) if err != nil { - return nil, nil, mge.InvalidBenchmarkTxPos, err + return nil, nil, exporter.InvalidBenchmarkTxPos, err } newAddresses, err := DeploySCsFromScenariosDeployTxs(testContext, stateAndBenchmarkInfo.DeployTxs) if err != nil { - return nil, nil, mge.InvalidBenchmarkTxPos, err + return nil, nil, exporter.InvalidBenchmarkTxPos, err } ReplaceScenariosScAddressesWithNewScAddresses(stateAndBenchmarkInfo.DeployedAccs, newAddresses, stateAndBenchmarkInfo.Txs) transactions = CreateTransactionsFromScenariosTxs(stateAndBenchmarkInfo.Txs) @@ -138,9 +138,9 @@ func SetStateFromScenariosTest(scenariosTestPath string) (testContext *vm.VMTest // CheckConverter - func CheckConverter(t *testing.T, scenariosTestPath string) { - stateAndBenchmarkInfo, err := mge.GetAccountsAndTransactionsFromScenarios(scenariosTestPath) + stateAndBenchmarkInfo, err := exporter.GetAccountsAndTransactionsFromScenarios(scenariosTestPath) require.Nil(t, err) - testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) + testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}, 1) require.Nil(t, err) err = CreateAccountsFromScenariosAccs(testContext, stateAndBenchmarkInfo.Accs) require.Nil(t, err) diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/adder/converterAdder_test.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/adder/converterAdder_test.go index b5d99257277..bf0fc2436fa 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/adder/converterAdder_test.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/adder/converterAdder_test.go @@ -7,9 +7,17 @@ import ( ) func TestScenariosConverter_AdderWithExternalSteps(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + mc.CheckConverter(t, "./adder_with_external_steps.scen.json") } func Benchmark_ScenariosConverter_AdderWithExternalSteps(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + mc.BenchmarkScenariosSpecificTx(b, "./adder_with_external_steps.scen.json") } diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/ecBenchmarks/converterEllipticCurves_test.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/ecBenchmarks/converterEllipticCurves_test.go index 1978b6c0794..1f7b260e707 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/ecBenchmarks/converterEllipticCurves_test.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/ecBenchmarks/converterEllipticCurves_test.go @@ -7,9 +7,17 @@ import ( ) func TestScenariosConverter_EllipticCurves(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + mc.CheckConverter(t, "./elliptic_curves.scen.json") } func Benchmark_ScenariosConverter_EllipticCurves(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + mc.BenchmarkScenariosSpecificTx(b, "./elliptic_curves.scen.json") } diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/mex/converterMex_test.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/mex/converterMex_test.go index bff4906aca6..c1719095a24 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/mex/converterMex_test.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosTests/mex/converterMex_test.go @@ -7,8 +7,16 @@ import ( ) func TestScenariosConverter_MexState(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + mc.CheckConverter(t, "./swap_fixed_input.scen.json") } func Benchmark_ScenariosConverter_SwapFixedInput(b *testing.B) { + if testing.Short() { + b.Skip("this is not a short benchmark") + } + mc.BenchmarkScenariosSpecificTx(b, "./swap_fixed_input.scen.json") } diff --git a/integrationTests/vm/wasm/wasmvm/versionswitch/vm_test.go b/integrationTests/vm/wasm/wasmvm/versionswitch/vm_test.go index 45565934c77..2361a44a6f1 100644 --- a/integrationTests/vm/wasm/wasmvm/versionswitch/vm_test.go +++ b/integrationTests/vm/wasm/wasmvm/versionswitch/vm_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package versionswitch import ( @@ -40,6 +36,7 @@ func TestSCExecutionWithVMVersionSwitching(t *testing.T) { {StartEpoch: 15, Version: "v1.2"}, {StartEpoch: 16, Version: "v1.4"}, }, + TransferAndExecuteByUserAddresses: []string{"3132333435363738393031323334353637383930313233343536373839303234"}, } gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) diff --git a/integrationTests/vm/wasm/wasmvm/versionswitch_revert/vm_test.go b/integrationTests/vm/wasm/wasmvm/versionswitch_revert/vm_test.go index dac92a24a75..5004b6cc546 100644 --- a/integrationTests/vm/wasm/wasmvm/versionswitch_revert/vm_test.go +++ b/integrationTests/vm/wasm/wasmvm/versionswitch_revert/vm_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package versionswitch_revert import ( @@ -35,6 +31,7 @@ func TestSCExecutionWithVMVersionSwitchingEpochRevert(t *testing.T) { {StartEpoch: 10, Version: "v1.5"}, {StartEpoch: 11, Version: "v1.4"}, }, + TransferAndExecuteByUserAddresses: []string{"3132333435363738393031323334353637383930313233343536373839303234"}, } gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) diff --git a/integrationTests/vm/wasm/wasmvm/versionswitch_vmquery/vm_test.go b/integrationTests/vm/wasm/wasmvm/versionswitch_vmquery/vm_test.go index 4af3688e4fa..b6357216c4e 100644 --- a/integrationTests/vm/wasm/wasmvm/versionswitch_vmquery/vm_test.go +++ b/integrationTests/vm/wasm/wasmvm/versionswitch_vmquery/vm_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package versionswitch_vmquery import ( @@ -35,6 +31,7 @@ func TestSCExecutionWithVMVersionSwitchingEpochRevertAndVMQueries(t *testing.T) {StartEpoch: 8, Version: "v1.3"}, {StartEpoch: 9, Version: "v1.4"}, }, + TransferAndExecuteByUserAddresses: []string{"3132333435363738393031323334353637383930313233343536373839303234"}, } gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) diff --git a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go index 9df0d4e22b5..9ad6a861235 100644 --- a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go +++ b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go @@ -1,7 +1,3 @@ -//go:build !race - -// TODO remove build condition above to allow -race -short, after Wasm VM fix - package wasmvm import ( @@ -18,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/sha256" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/mock" @@ -34,6 +31,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/multiversx/mx-chain-vm-common-go/builtInFunctions" @@ -46,6 +44,10 @@ import ( var log = logger.GetOrCreate("wasmVMtest") func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(100000000) @@ -92,6 +94,10 @@ func TestVmDeployWithTransferAndGasShouldDeploySCCode(t *testing.T) { } func TestVmSCDeployFactory(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddressBytes := []byte("12345678901234567890123456789012") senderNonce := uint64(0) senderBalance := big.NewInt(100000000) @@ -148,6 +154,10 @@ func TestVmSCDeployFactory(t *testing.T) { } func TestSCMoveBalanceBeforeSCDeployV1(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + ownerAddressBytes := []byte("12345678901234567890123456789012") ownerNonce := uint64(0) ownerBalance := big.NewInt(100000000) @@ -228,6 +238,10 @@ func TestSCMoveBalanceBeforeSCDeployV1(t *testing.T) { } func TestSCMoveBalanceBeforeSCDeploy(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + ownerAddressBytes := []byte("12345678901234567890123456789012") ownerNonce := uint64(0) ownerBalance := big.NewInt(100000000) @@ -307,6 +321,10 @@ func TestSCMoveBalanceBeforeSCDeploy(t *testing.T) { } func TestWASMMetering(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + ownerAddressBytes := []byte("12345678901234567890123456789012") ownerNonce := uint64(11) ownerBalance := big.NewInt(0xfffffffffffffff) @@ -408,6 +426,7 @@ func TestMultipleTimesERC20RustBigIntInBatches(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) durations, err := DeployAndExecuteERC20WithBigInt(3, 1000, gasSchedule, "../testdata/erc20-c-03/rust-simple-erc20.wasm", "transfer") require.Nil(t, err) @@ -446,6 +465,10 @@ func displayBenchmarksResults(durations []time.Duration) { } func TestDeployERC20WithNotEnoughGasShouldReturnOutOfGas(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + gasSchedule, _ := common.LoadGasScheduleConfig(integrationTests.GasSchedulePath) ownerAddressBytes := []byte("12345678901234567890123456789011") ownerNonce := uint64(11) @@ -480,8 +503,7 @@ func TestDeployERC20WithNotEnoughGasShouldReturnOutOfGas(t *testing.T) { } func TestJournalizingAndTimeToProcessChange(t *testing.T) { - // Only a test to benchmark jurnalizing and getting data from trie - t.Skip() + t.Skip("Only a test to benchmark jurnalizing and getting data from trie") numRun := 1000 ownerAddressBytes := []byte("12345678901234567890123456789011") @@ -577,8 +599,7 @@ func TestJournalizingAndTimeToProcessChange(t *testing.T) { } func TestExecuteTransactionAndTimeToProcessChange(t *testing.T) { - // Only a test to benchmark transaction processing - t.Skip() + t.Skip("Only a test to benchmark transaction processing") testMarshalizer := &marshal.JsonMarshalizer{} testHasher := sha256.NewSha256() @@ -609,23 +630,25 @@ func TestExecuteTransactionAndTimeToProcessChange(t *testing.T) { _, _ = vm.CreateAccount(accnts, ownerAddressBytes, ownerNonce, ownerBalance) argsNewTxProcessor := processTransaction.ArgsNewTxProcessor{ - Accounts: accnts, - Hasher: testHasher, - PubkeyConv: pubkeyConv, - Marshalizer: testMarshalizer, - SignMarshalizer: testMarshalizer, - ShardCoordinator: shardCoordinator, - ScProcessor: &testscommon.SCProcessorMock{}, - TxFeeHandler: &testscommon.UnsignedTxHandlerStub{}, - TxTypeHandler: txTypeHandler, - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, - BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, - ArgsParser: smartContract.NewArgumentParser(), - ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, - EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - TxLogsProcessor: &mock.TxLogsProcessorStub{}, + Accounts: accnts, + Hasher: testHasher, + PubkeyConv: pubkeyConv, + Marshalizer: testMarshalizer, + SignMarshalizer: testMarshalizer, + ShardCoordinator: shardCoordinator, + ScProcessor: &testscommon.SCProcessorMock{}, + TxFeeHandler: &testscommon.UnsignedTxHandlerStub{}, + TxTypeHandler: txTypeHandler, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, + BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, + ArgsParser: smartContract.NewArgumentParser(), + ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, + EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + TxLogsProcessor: &mock.TxLogsProcessorStub{}, + RelayedTxV3Processor: &processMocks.RelayedTxV3ProcessorMock{}, + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } txProc, _ := processTransaction.NewTxProcessor(argsNewTxProcessor) @@ -805,7 +828,7 @@ func TestAndCatchTrieError(t *testing.T) { log.Info("finished a set - commit and recreate trie", "index", i) if i%10 == 5 { testContext.Accounts.PruneTrie(extraNewRootHash, state.NewRoot, state.NewPruningHandler(state.EnableDataRemoval)) - _ = testContext.Accounts.RecreateTrie(rootHash) + _ = testContext.Accounts.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHash)) continue } @@ -817,6 +840,10 @@ func TestAndCatchTrieError(t *testing.T) { } func TestCommunityContract_InShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + zero := big.NewInt(0) transferEGLD := big.NewInt(42) @@ -859,6 +886,10 @@ func TestCommunityContract_InShard(t *testing.T) { } func TestCommunityContract_CrossShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + zero := big.NewInt(0) transferEGLD := big.NewInt(42) @@ -904,6 +935,10 @@ func TestCommunityContract_CrossShard(t *testing.T) { } func TestCommunityContract_CrossShard_TxProcessor(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + // Scenario: // 1. Deploy FunderSC on shard 0, owned by funderOwner // 2. Deploy ParentSC on shard 1, owned by parentOwner; deployment needs address of FunderSC @@ -913,11 +948,11 @@ func TestCommunityContract_CrossShard_TxProcessor(t *testing.T) { zero := big.NewInt(0) transferEGLD := big.NewInt(42) - testContextFunderSC, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + testContextFunderSC, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextFunderSC.Close() - testContextParentSC, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + testContextParentSC, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}, 1) require.Nil(t, err) defer testContextParentSC.Close() @@ -1018,6 +1053,10 @@ func TestCommunityContract_CrossShard_TxProcessor(t *testing.T) { } func TestDeployDNSV2SetDeleteUserNames(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + senderAddressBytes, _ := vm.TestAddressPubkeyConverter.Decode(vm.DNSV2DeployerAddress) senderNonce := uint64(0) senderBalance := big.NewInt(100000000) diff --git a/keysManagement/export_test.go b/keysManagement/export_test.go index b9e80ddcc66..42d1ee00317 100644 --- a/keysManagement/export_test.go +++ b/keysManagement/export_test.go @@ -6,6 +6,12 @@ import ( "github.com/multiversx/mx-chain-go/common" ) +// exported constants +const ( + RedundancyReasonForOneKey = redundancyReasonForOneKey + RedundancyReasonForMultipleKeys = redundancyReasonForMultipleKeys +) + // GetRoundsOfInactivity - func (pInfo *peerInfo) GetRoundsOfInactivity() int { pInfo.mutChangeableData.RLock() diff --git a/keysManagement/keysHandler.go b/keysManagement/keysHandler.go index 109b05fc712..1b4b83c2e6f 100644 --- a/keysManagement/keysHandler.go +++ b/keysManagement/keysHandler.go @@ -120,6 +120,11 @@ func (handler *keysHandler) ResetRoundsWithoutReceivedMessages(pkBytes []byte, p handler.managedPeersHolder.ResetRoundsWithoutReceivedMessages(pkBytes, pid) } +// GetRedundancyStepInReason returns the reason if the current node stepped in as a redundancy node +func (handler *keysHandler) GetRedundancyStepInReason() string { + return handler.managedPeersHolder.GetRedundancyStepInReason() +} + // IsInterfaceNil returns true if there is no value under the interface func (handler *keysHandler) IsInterfaceNil() bool { return handler == nil diff --git a/keysManagement/keysHandler_test.go b/keysManagement/keysHandler_test.go index fecfddf3a29..886053a1b94 100644 --- a/keysManagement/keysHandler_test.go +++ b/keysManagement/keysHandler_test.go @@ -268,3 +268,18 @@ func TestKeysHandler_ResetRoundsWithoutReceivedMessages(t *testing.T) { assert.Equal(t, 1, len(mapResetCalled)) assert.Equal(t, 1, mapResetCalled[string(randomPublicKeyBytes)]) } + +func TestKeysHandler_GetRedundancyStepInReason(t *testing.T) { + t.Parallel() + + expectedString := "expected string" + args := createMockArgsKeysHandler() + args.ManagedPeersHolder = &testscommon.ManagedPeersHolderStub{ + GetRedundancyStepInReasonCalled: func() string { + return expectedString + }, + } + + handler, _ := keysManagement.NewKeysHandler(args) + assert.Equal(t, expectedString, handler.GetRedundancyStepInReason()) +} diff --git a/keysManagement/managedPeersHolder.go b/keysManagement/managedPeersHolder.go index 93e48fa2e30..8156b64c8eb 100644 --- a/keysManagement/managedPeersHolder.go +++ b/keysManagement/managedPeersHolder.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "encoding/hex" "fmt" + "sort" "sync" "time" @@ -19,6 +20,11 @@ import ( var log = logger.GetOrCreate("keysManagement") +const ( + redundancyReasonForOneKey = "multikey node stepped in with one key" + redundancyReasonForMultipleKeys = "multikey node stepped in with %d keys" +) + type managedPeersHolder struct { mut sync.RWMutex defaultPeerInfoCurrentIndex int @@ -276,7 +282,7 @@ func (holder *managedPeersHolder) ResetRoundsWithoutReceivedMessages(pkBytes []b pInfo.resetRoundsWithoutReceivedMessages() } -// GetManagedKeysByCurrentNode returns all keys that will be managed by this node +// GetManagedKeysByCurrentNode returns all keys that should act as validator(main or backup that took over) and will be managed by this node func (holder *managedPeersHolder) GetManagedKeysByCurrentNode() map[string]crypto.PrivateKey { holder.mut.RLock() defer holder.mut.RUnlock() @@ -294,6 +300,23 @@ func (holder *managedPeersHolder) GetManagedKeysByCurrentNode() map[string]crypt return allManagedKeys } +// GetLoadedKeysByCurrentNode returns all keys that were loaded and will be managed by this node +func (holder *managedPeersHolder) GetLoadedKeysByCurrentNode() [][]byte { + holder.mut.RLock() + defer holder.mut.RUnlock() + + allLoadedKeys := make([][]byte, 0, len(holder.data)) + for pk := range holder.data { + allLoadedKeys = append(allLoadedKeys, []byte(pk)) + } + + sort.Slice(allLoadedKeys, func(i, j int) bool { + return string(allLoadedKeys[i]) < string(allLoadedKeys[j]) + }) + + return allLoadedKeys +} + // IsKeyManagedByCurrentNode returns true if the key is managed by the current node func (holder *managedPeersHolder) IsKeyManagedByCurrentNode(pkBytes []byte) bool { pInfo := holder.getPeerInfo(pkBytes) @@ -369,6 +392,26 @@ func (holder *managedPeersHolder) IsMultiKeyMode() bool { return len(holder.data) > 0 } +// GetRedundancyStepInReason returns the reason if the current node stepped in as a redundancy node +// Returns empty string if the current node is the main multikey machine, the machine is not running in multikey mode +// or the machine is acting as a backup but the main machine is acting accordingly +func (holder *managedPeersHolder) GetRedundancyStepInReason() string { + if holder.isMainMachine { + return "" + } + + numManagedKeys := len(holder.GetManagedKeysByCurrentNode()) + if numManagedKeys == 0 { + return "" + } + + if numManagedKeys == 1 { + return redundancyReasonForOneKey + } + + return fmt.Sprintf(redundancyReasonForMultipleKeys, numManagedKeys) +} + // IsInterfaceNil returns true if there is no value under the interface func (holder *managedPeersHolder) IsInterfaceNil() bool { return holder == nil diff --git a/keysManagement/managedPeersHolder_test.go b/keysManagement/managedPeersHolder_test.go index 7c2d278f9cd..9a8c66fb849 100644 --- a/keysManagement/managedPeersHolder_test.go +++ b/keysManagement/managedPeersHolder_test.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "errors" "fmt" + "runtime" "strings" "sync" "testing" @@ -13,7 +14,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-crypto-go" + crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/keysManagement" @@ -751,6 +752,24 @@ func TestManagedPeersHolder_GetManagedKeysByCurrentNode(t *testing.T) { }) } +func TestManagedPeersHolder_GetLoadedKeysByCurrentNode(t *testing.T) { + t.Parallel() + + args := createMockArgsManagedPeersHolder() + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes1) + _ = holder.AddManagedPeer(skBytes0) + + for i := 0; i < 10; i++ { + holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) + } + + result := holder.GetLoadedKeysByCurrentNode() + assert.Equal(t, 2, len(result)) + assert.Equal(t, pkBytes0, result[0]) + assert.Equal(t, pkBytes1, result[1]) +} + func TestManagedPeersHolder_IsKeyManagedByCurrentNode(t *testing.T) { t.Parallel() @@ -887,6 +906,10 @@ func TestManagedPeersHolder_IsKeyValidator(t *testing.T) { } func TestManagedPeersHolder_GetNextPeerAuthenticationTime(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("skipping on darwin") + } + t.Parallel() holder, _ := keysManagement.NewManagedPeersHolder(createMockArgsManagedPeersHolder()) @@ -935,6 +958,65 @@ func TestManagedPeersHolder_IsMultiKeyMode(t *testing.T) { }) } +func TestManagedPeersHolder_GetRedundancyStepInReason(t *testing.T) { + t.Parallel() + + t.Run("main machine mode", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + holder, _ := keysManagement.NewManagedPeersHolder(args) + assert.Empty(t, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode but no managed keys", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + assert.Empty(t, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode with one managed key, main active", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes0) + + assert.Empty(t, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode with one managed key, main inactive", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes0) + for i := 0; i < args.MaxRoundsOfInactivity+1; i++ { + holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) + } + + assert.Equal(t, keysManagement.RedundancyReasonForOneKey, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode with 2 managed keys, main active", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes0) + _ = holder.AddManagedPeer(skBytes1) + + assert.Empty(t, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode with 2 managed keys, main inactive", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes0) + _ = holder.AddManagedPeer(skBytes1) + + for i := 0; i < args.MaxRoundsOfInactivity+1; i++ { + holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) + holder.IncrementRoundsWithoutReceivedMessages(pkBytes1) + } + + expectedReason := fmt.Sprintf(keysManagement.RedundancyReasonForMultipleKeys, 2) + assert.Equal(t, expectedReason, holder.GetRedundancyStepInReason()) + }) +} + func TestManagedPeersHolder_ParallelOperationsShouldNotPanic(t *testing.T) { defer func() { r := recover() @@ -984,10 +1066,12 @@ func TestManagedPeersHolder_ParallelOperationsShouldNotPanic(t *testing.T) { _, _ = holder.GetNextPeerAuthenticationTime(pkBytes0) case 13: holder.SetNextPeerAuthenticationTime(pkBytes0, time.Now()) + case 14: + _ = holder.GetRedundancyStepInReason() } wg.Done() - }(i % 14) + }(i % 15) } wg.Wait() diff --git a/keysManagement/managedPeersMonitor.go b/keysManagement/managedPeersMonitor.go index 2c2eef290b4..5f9f117cc2b 100644 --- a/keysManagement/managedPeersMonitor.go +++ b/keysManagement/managedPeersMonitor.go @@ -60,7 +60,7 @@ func (monitor *managedPeersMonitor) GetManagedKeysCount() int { return len(monitor.managedPeersHolder.GetManagedKeysByCurrentNode()) } -// GetManagedKeys returns all keys managed by the current node +// GetManagedKeys returns all keys that should act as validator(main or backup that took over) and will be managed by this node func (monitor *managedPeersMonitor) GetManagedKeys() [][]byte { managedKeysMap := monitor.managedPeersHolder.GetManagedKeysByCurrentNode() managedKeys := make([][]byte, 0, len(managedKeysMap)) @@ -75,6 +75,11 @@ func (monitor *managedPeersMonitor) GetManagedKeys() [][]byte { return managedKeys } +// GetLoadedKeys returns all keys that were loaded and will be managed by this node +func (monitor *managedPeersMonitor) GetLoadedKeys() [][]byte { + return monitor.managedPeersHolder.GetLoadedKeysByCurrentNode() +} + // GetEligibleManagedKeys returns eligible keys that are managed by the current node in the current epoch func (monitor *managedPeersMonitor) GetEligibleManagedKeys() ([][]byte, error) { epoch := monitor.epochProvider.CurrentEpoch() diff --git a/keysManagement/managedPeersMonitor_test.go b/keysManagement/managedPeersMonitor_test.go index 9ec9dbcd8ad..4be6a5282ca 100644 --- a/keysManagement/managedPeersMonitor_test.go +++ b/keysManagement/managedPeersMonitor_test.go @@ -281,3 +281,20 @@ func TestManagedPeersMonitor_GetManagedKeys(t *testing.T) { keys := monitor.GetManagedKeys() require.Equal(t, expectedManagedKeys, keys) } + +func TestManagedPeersMonitor_GetLoadedKeys(t *testing.T) { + t.Parallel() + + loadedKeys := [][]byte{[]byte("pk1"), []byte("pk2"), []byte("pk3")} + args := createMockArgManagedPeersMonitor() + args.ManagedPeersHolder = &testscommon.ManagedPeersHolderStub{ + GetLoadedKeysByCurrentNodeCalled: func() [][]byte { + return loadedKeys + }, + } + monitor, err := NewManagedPeersMonitor(args) + require.NoError(t, err) + + keys := monitor.GetLoadedKeys() + require.Equal(t, loadedKeys, keys) +} diff --git a/node/chainSimulator/chainSimulator.go b/node/chainSimulator/chainSimulator.go new file mode 100644 index 00000000000..742d040c8c8 --- /dev/null +++ b/node/chainSimulator/chainSimulator.go @@ -0,0 +1,720 @@ +package chainSimulator + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "math/big" + "sync" + "time" + + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + chainSimulatorErrors "github.com/multiversx/mx-chain-go/node/chainSimulator/errors" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + mxChainSharding "github.com/multiversx/mx-chain-go/sharding" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/core/sharding" + "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/transaction" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-crypto-go/signing" + "github.com/multiversx/mx-chain-crypto-go/signing/mcl" + logger "github.com/multiversx/mx-chain-logger-go" +) + +const delaySendTxs = time.Millisecond + +var log = logger.GetOrCreate("chainSimulator") + +type transactionWithResult struct { + hexHash string + tx *transaction.Transaction + result *transaction.ApiTransactionResult +} + +// ArgsChainSimulator holds the arguments needed to create a new instance of simulator +type ArgsChainSimulator struct { + BypassTxSignatureCheck bool + TempDir string + PathToInitialConfig string + NumOfShards uint32 + MinNodesPerShard uint32 + MetaChainMinNodes uint32 + NumNodesWaitingListShard uint32 + NumNodesWaitingListMeta uint32 + GenesisTimestamp int64 + InitialRound int64 + InitialEpoch uint32 + InitialNonce uint64 + RoundDurationInMillis uint64 + RoundsPerEpoch core.OptionalUint64 + ApiInterface components.APIConfigurator + AlterConfigsFunction func(cfg *config.Configs) + VmQueryDelayAfterStartInMs uint64 +} + +// ArgsBaseChainSimulator holds the arguments needed to create a new instance of simulator +type ArgsBaseChainSimulator struct { + ArgsChainSimulator + ConsensusGroupSize uint32 + MetaChainConsensusGroupSize uint32 +} + +type simulator struct { + chanStopNodeProcess chan endProcess.ArgEndProcess + syncedBroadcastNetwork components.SyncedBroadcastNetworkHandler + handlers []ChainHandler + initialWalletKeys *dtos.InitialWalletKeys + initialStakedKeys map[string]*dtos.BLSKey + validatorsPrivateKeys []crypto.PrivateKey + nodes map[uint32]process.NodeHandler + numOfShards uint32 + mutex sync.RWMutex +} + +// NewChainSimulator will create a new instance of simulator +func NewChainSimulator(args ArgsChainSimulator) (*simulator, error) { + return NewBaseChainSimulator(ArgsBaseChainSimulator{ + ArgsChainSimulator: args, + ConsensusGroupSize: configs.ChainSimulatorConsensusGroupSize, + MetaChainConsensusGroupSize: configs.ChainSimulatorConsensusGroupSize, + }) +} + +// NewBaseChainSimulator will create a new instance of simulator +func NewBaseChainSimulator(args ArgsBaseChainSimulator) (*simulator, error) { + instance := &simulator{ + syncedBroadcastNetwork: components.NewSyncedBroadcastNetwork(), + nodes: make(map[uint32]process.NodeHandler), + handlers: make([]ChainHandler, 0, args.NumOfShards+1), + numOfShards: args.NumOfShards, + chanStopNodeProcess: make(chan endProcess.ArgEndProcess), + mutex: sync.RWMutex{}, + initialStakedKeys: make(map[string]*dtos.BLSKey), + } + + err := instance.createChainHandlers(args) + if err != nil { + return nil, err + } + + return instance, nil +} + +func (s *simulator) createChainHandlers(args ArgsBaseChainSimulator) error { + outputConfigs, err := configs.CreateChainSimulatorConfigs(configs.ArgsChainSimulatorConfigs{ + NumOfShards: args.NumOfShards, + OriginalConfigsPath: args.PathToInitialConfig, + GenesisTimeStamp: computeStartTimeBaseOnInitialRound(args.ArgsChainSimulator), + RoundDurationInMillis: args.RoundDurationInMillis, + TempDir: args.TempDir, + MinNodesPerShard: args.MinNodesPerShard, + ConsensusGroupSize: args.ConsensusGroupSize, + MetaChainMinNodes: args.MetaChainMinNodes, + MetaChainConsensusGroupSize: args.MetaChainConsensusGroupSize, + RoundsPerEpoch: args.RoundsPerEpoch, + InitialEpoch: args.InitialEpoch, + AlterConfigsFunction: args.AlterConfigsFunction, + NumNodesWaitingListShard: args.NumNodesWaitingListShard, + NumNodesWaitingListMeta: args.NumNodesWaitingListMeta, + }) + if err != nil { + return err + } + + for idx := -1; idx < int(args.NumOfShards); idx++ { + shardIDStr := fmt.Sprintf("%d", idx) + if idx == -1 { + shardIDStr = "metachain" + } + + node, errCreate := s.createTestNode(*outputConfigs, args, shardIDStr) + if errCreate != nil { + return errCreate + } + + chainHandler, errCreate := process.NewBlocksCreator(node) + if errCreate != nil { + return errCreate + } + + shardID := node.GetShardCoordinator().SelfId() + s.nodes[shardID] = node + s.handlers = append(s.handlers, chainHandler) + + if node.GetShardCoordinator().SelfId() == core.MetachainShardId { + currentRootHash, errRootHash := node.GetProcessComponents().ValidatorsStatistics().RootHash() + if errRootHash != nil { + return errRootHash + } + + allValidatorsInfo, errGet := node.GetProcessComponents().ValidatorsStatistics().GetValidatorInfoForRootHash(currentRootHash) + if errGet != nil { + return errGet + } + + err = node.GetProcessComponents().EpochSystemSCProcessor().ProcessSystemSmartContract( + allValidatorsInfo, + node.GetDataComponents().Blockchain().GetGenesisHeader(), + ) + if err != nil { + return err + } + + _, err = node.GetStateComponents().AccountsAdapter().Commit() + if err != nil { + return err + } + } + } + + s.initialWalletKeys = outputConfigs.InitialWallets + s.validatorsPrivateKeys = outputConfigs.ValidatorsPrivateKeys + + log.Info("running the chain simulator with the following parameters", + "number of shards (including meta)", args.NumOfShards+1, + "round per epoch", outputConfigs.Configs.GeneralConfig.EpochStartConfig.RoundsPerEpoch, + "round duration", time.Millisecond*time.Duration(args.RoundDurationInMillis), + "genesis timestamp", args.GenesisTimestamp, + "original config path", args.PathToInitialConfig, + "temporary path", args.TempDir) + + return nil +} + +func computeStartTimeBaseOnInitialRound(args ArgsChainSimulator) int64 { + return args.GenesisTimestamp + int64(args.RoundDurationInMillis/1000)*args.InitialRound +} + +func (s *simulator) createTestNode( + outputConfigs configs.ArgsConfigsSimulator, args ArgsBaseChainSimulator, shardIDStr string, +) (process.NodeHandler, error) { + argsTestOnlyProcessorNode := components.ArgsTestOnlyProcessingNode{ + Configs: outputConfigs.Configs, + ChanStopNodeProcess: s.chanStopNodeProcess, + SyncedBroadcastNetwork: s.syncedBroadcastNetwork, + NumShards: s.numOfShards, + GasScheduleFilename: outputConfigs.GasScheduleFilename, + ShardIDStr: shardIDStr, + APIInterface: args.ApiInterface, + BypassTxSignatureCheck: args.BypassTxSignatureCheck, + InitialRound: args.InitialRound, + InitialNonce: args.InitialNonce, + MinNodesPerShard: args.MinNodesPerShard, + ConsensusGroupSize: args.ConsensusGroupSize, + MinNodesMeta: args.MetaChainMinNodes, + MetaChainConsensusGroupSize: args.MetaChainConsensusGroupSize, + RoundDurationInMillis: args.RoundDurationInMillis, + VmQueryDelayAfterStartInMs: args.VmQueryDelayAfterStartInMs, + } + + return components.NewTestOnlyProcessingNode(argsTestOnlyProcessorNode) +} + +// GenerateBlocks will generate the provided number of blocks +func (s *simulator) GenerateBlocks(numOfBlocks int) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + for idx := 0; idx < numOfBlocks; idx++ { + s.incrementRoundOnAllValidators() + err := s.allNodesCreateBlocks() + if err != nil { + return err + } + } + return nil +} + +// GenerateBlocksUntilEpochIsReached will generate blocks until the epoch is reached +func (s *simulator) GenerateBlocksUntilEpochIsReached(targetEpoch int32) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + maxNumberOfRounds := 10000 + for idx := 0; idx < maxNumberOfRounds; idx++ { + s.incrementRoundOnAllValidators() + err := s.allNodesCreateBlocks() + if err != nil { + return err + } + + epochReachedOnAllNodes, err := s.isTargetEpochReached(targetEpoch) + if err != nil { + return err + } + + if epochReachedOnAllNodes { + return nil + } + } + return fmt.Errorf("exceeded rounds to generate blocks") +} + +// ForceResetValidatorStatisticsCache will force the reset of the cache used for the validators statistics endpoint +func (s *simulator) ForceResetValidatorStatisticsCache() error { + metachainNode := s.GetNodeHandler(core.MetachainShardId) + if check.IfNil(metachainNode) { + return errNilMetachainNode + } + + return metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() +} + +func (s *simulator) isTargetEpochReached(targetEpoch int32) (bool, error) { + metachainNode := s.nodes[core.MetachainShardId] + metachainEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() + + for shardID, n := range s.nodes { + if shardID != core.MetachainShardId { + if int32(n.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch()) < int32(metachainEpoch-1) { + return false, fmt.Errorf("shard %d is with at least 2 epochs behind metachain shard node epoch %d, metachain node epoch %d", + shardID, n.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch(), metachainEpoch) + } + } + + if int32(n.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch()) < targetEpoch { + return false, nil + } + } + + return true, nil +} + +func (s *simulator) incrementRoundOnAllValidators() { + for _, node := range s.handlers { + node.IncrementRound() + } +} + +// ForceChangeOfEpoch will force the change of current epoch +// This method will call the epoch change trigger and generate block till a new epoch is reached +func (s *simulator) ForceChangeOfEpoch() error { + log.Info("force change of epoch") + for shardID, node := range s.nodes { + err := node.ForceChangeOfEpoch() + if err != nil { + return fmt.Errorf("force change of epoch shardID-%d: error-%w", shardID, err) + } + } + + epoch := s.nodes[core.MetachainShardId].GetProcessComponents().EpochStartTrigger().Epoch() + + return s.GenerateBlocksUntilEpochIsReached(int32(epoch + 1)) +} + +func (s *simulator) allNodesCreateBlocks() error { + for _, node := range s.handlers { + // TODO MX-15150 remove this when we remove all goroutines + time.Sleep(2 * time.Millisecond) + + err := node.CreateNewBlock() + if err != nil { + return err + } + } + + return nil +} + +// GetNodeHandler returns the node handler from the provided shardID +func (s *simulator) GetNodeHandler(shardID uint32) process.NodeHandler { + s.mutex.RLock() + defer s.mutex.RUnlock() + + return s.nodes[shardID] +} + +// GetRestAPIInterfaces will return a map with the rest api interfaces for every node +func (s *simulator) GetRestAPIInterfaces() map[uint32]string { + s.mutex.Lock() + defer s.mutex.Unlock() + + resMap := make(map[uint32]string) + for shardID, node := range s.nodes { + resMap[shardID] = node.GetFacadeHandler().RestApiInterface() + } + + return resMap +} + +// GetInitialWalletKeys will return the initial wallet keys +func (s *simulator) GetInitialWalletKeys() *dtos.InitialWalletKeys { + return s.initialWalletKeys +} + +// AddValidatorKeys will add the provided validators private keys in the keys handler on all nodes +func (s *simulator) AddValidatorKeys(validatorsPrivateKeys [][]byte) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + for _, node := range s.nodes { + err := s.setValidatorKeysForNode(node, validatorsPrivateKeys) + if err != nil { + return err + } + } + + return nil +} + +// GenerateAndMintWalletAddress will generate an address in the provided shard and will mint that address with the provided value +// if the target shard ID value does not correspond to a node handled by the chain simulator, the address will be generated in a random shard ID +func (s *simulator) GenerateAndMintWalletAddress(targetShardID uint32, value *big.Int) (dtos.WalletAddress, error) { + addressConverter := s.nodes[core.MetachainShardId].GetCoreComponents().AddressPubKeyConverter() + nodeHandler := s.GetNodeHandler(targetShardID) + var buff []byte + if check.IfNil(nodeHandler) { + buff = generateAddress(addressConverter.Len()) + } else { + buff = generateAddressInShard(nodeHandler.GetShardCoordinator(), addressConverter.Len()) + } + + address, err := addressConverter.Encode(buff) + if err != nil { + return dtos.WalletAddress{}, err + } + + err = s.SetStateMultiple([]*dtos.AddressState{ + { + Address: address, + Balance: value.String(), + }, + }) + + return dtos.WalletAddress{ + Bech32: address, + Bytes: buff, + }, err +} + +func generateAddressInShard(shardCoordinator mxChainSharding.Coordinator, len int) []byte { + for { + buff := generateAddress(len) + shardID := shardCoordinator.ComputeId(buff) + if shardID == shardCoordinator.SelfId() { + return buff + } + } +} + +func generateAddress(len int) []byte { + buff := make([]byte, len) + _, _ = rand.Read(buff) + + return buff +} + +func (s *simulator) setValidatorKeysForNode(node process.NodeHandler, validatorsPrivateKeys [][]byte) error { + for idx, privateKey := range validatorsPrivateKeys { + + err := node.GetCryptoComponents().ManagedPeersHolder().AddManagedPeer(privateKey) + if err != nil { + return fmt.Errorf("cannot add private key for shard=%d, index=%d, error=%s", node.GetShardCoordinator().SelfId(), idx, err.Error()) + } + } + + return nil +} + +// GetValidatorPrivateKeys will return the initial validators private keys +func (s *simulator) GetValidatorPrivateKeys() []crypto.PrivateKey { + s.mutex.Lock() + defer s.mutex.Unlock() + + return s.validatorsPrivateKeys +} + +// SetKeyValueForAddress will set the provided state for a given address +func (s *simulator) SetKeyValueForAddress(address string, keyValueMap map[string]string) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + addressConverter := s.nodes[core.MetachainShardId].GetCoreComponents().AddressPubKeyConverter() + addressBytes, err := addressConverter.Decode(address) + if err != nil { + return err + } + + if bytes.Equal(addressBytes, core.SystemAccountAddress) { + return s.setKeyValueSystemAccount(keyValueMap) + } + + shardID := sharding.ComputeShardID(addressBytes, s.numOfShards) + testNode, ok := s.nodes[shardID] + if !ok { + return fmt.Errorf("cannot find a test node for the computed shard id, computed shard id: %d", shardID) + } + + return testNode.SetKeyValueForAddress(addressBytes, keyValueMap) +} + +func (s *simulator) setKeyValueSystemAccount(keyValueMap map[string]string) error { + for shard, node := range s.nodes { + err := node.SetKeyValueForAddress(core.SystemAccountAddress, keyValueMap) + if err != nil { + return fmt.Errorf("%w for shard %d", err, shard) + } + } + + return nil +} + +// SetStateMultiple will set state for multiple addresses +func (s *simulator) SetStateMultiple(stateSlice []*dtos.AddressState) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + addressConverter := s.nodes[core.MetachainShardId].GetCoreComponents().AddressPubKeyConverter() + for _, stateValue := range stateSlice { + addressBytes, err := addressConverter.Decode(stateValue.Address) + if err != nil { + return err + } + + if bytes.Equal(addressBytes, core.SystemAccountAddress) { + err = s.setStateSystemAccount(stateValue) + } else { + shardID := sharding.ComputeShardID(addressBytes, s.numOfShards) + err = s.nodes[shardID].SetStateForAddress(addressBytes, stateValue) + } + if err != nil { + return err + } + } + + return nil +} + +// RemoveAccounts will try to remove all accounts data for the addresses provided +func (s *simulator) RemoveAccounts(addresses []string) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + addressConverter := s.nodes[core.MetachainShardId].GetCoreComponents().AddressPubKeyConverter() + for _, address := range addresses { + addressBytes, err := addressConverter.Decode(address) + if err != nil { + return err + } + + if bytes.Equal(addressBytes, core.SystemAccountAddress) { + err = s.removeAllSystemAccounts() + } else { + shardID := sharding.ComputeShardID(addressBytes, s.numOfShards) + err = s.nodes[shardID].RemoveAccount(addressBytes) + } + if err != nil { + return err + } + } + + return nil +} + +// SendTxAndGenerateBlockTilTxIsExecuted will send the provided transaction and generate block until the transaction is executed +func (s *simulator) SendTxAndGenerateBlockTilTxIsExecuted(txToSend *transaction.Transaction, maxNumOfBlocksToGenerateWhenExecutingTx int) (*transaction.ApiTransactionResult, error) { + result, err := s.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txToSend}, maxNumOfBlocksToGenerateWhenExecutingTx) + if err != nil { + return nil, err + } + + return result[0], nil +} + +// SendTxsAndGenerateBlocksTilAreExecuted will send the provided transactions and generate block until all transactions are executed +func (s *simulator) SendTxsAndGenerateBlocksTilAreExecuted(txsToSend []*transaction.Transaction, maxNumOfBlocksToGenerateWhenExecutingTx int) ([]*transaction.ApiTransactionResult, error) { + if len(txsToSend) == 0 { + return nil, chainSimulatorErrors.ErrEmptySliceOfTxs + } + if maxNumOfBlocksToGenerateWhenExecutingTx == 0 { + return nil, chainSimulatorErrors.ErrInvalidMaxNumOfBlocks + } + + transactionStatus := make([]*transactionWithResult, 0, len(txsToSend)) + for idx, tx := range txsToSend { + if tx == nil { + return nil, fmt.Errorf("%w on position %d", chainSimulatorErrors.ErrNilTransaction, idx) + } + + txHashHex, err := s.sendTx(tx) + if err != nil { + return nil, err + } + + transactionStatus = append(transactionStatus, &transactionWithResult{ + hexHash: txHashHex, + tx: tx, + }) + } + + time.Sleep(delaySendTxs) + + for count := 0; count < maxNumOfBlocksToGenerateWhenExecutingTx; count++ { + err := s.GenerateBlocks(1) + if err != nil { + return nil, err + } + + txsAreExecuted := s.computeTransactionsStatus(transactionStatus) + if txsAreExecuted { + return getApiTransactionsFromResult(transactionStatus), nil + } + } + + return nil, errors.New("something went wrong. Transaction(s) is/are still in pending") +} + +func (s *simulator) computeTransactionsStatus(txsWithResult []*transactionWithResult) bool { + allAreExecuted := true + contractDeploySCAddress := make([]byte, s.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Len()) + for _, resultTx := range txsWithResult { + if resultTx.result != nil { + continue + } + + sentTx := resultTx.tx + destinationShardID := s.GetNodeHandler(0).GetShardCoordinator().ComputeId(sentTx.RcvAddr) + if bytes.Equal(sentTx.RcvAddr, contractDeploySCAddress) { + destinationShardID = s.GetNodeHandler(0).GetShardCoordinator().ComputeId(sentTx.SndAddr) + } + + result, errGet := s.GetNodeHandler(destinationShardID).GetFacadeHandler().GetTransaction(resultTx.hexHash, true) + if errGet == nil && result.Status != transaction.TxStatusPending { + log.Info("############## transaction was executed ##############", "txHash", resultTx.hexHash) + resultTx.result = result + continue + } + + allAreExecuted = false + } + + return allAreExecuted +} + +func getApiTransactionsFromResult(txWithResult []*transactionWithResult) []*transaction.ApiTransactionResult { + result := make([]*transaction.ApiTransactionResult, 0, len(txWithResult)) + for _, tx := range txWithResult { + result = append(result, tx.result) + } + + return result +} + +func (s *simulator) sendTx(tx *transaction.Transaction) (string, error) { + shardID := s.GetNodeHandler(0).GetShardCoordinator().ComputeId(tx.SndAddr) + err := s.GetNodeHandler(shardID).GetFacadeHandler().ValidateTransaction(tx) + if err != nil { + return "", err + } + + node := s.GetNodeHandler(shardID) + txHash, err := core.CalculateHash(node.GetCoreComponents().InternalMarshalizer(), node.GetCoreComponents().Hasher(), tx) + if err != nil { + return "", err + } + + txHashHex := hex.EncodeToString(txHash) + _, err = node.GetFacadeHandler().SendBulkTransactions([]*transaction.Transaction{tx}) + if err != nil { + return "", err + } + + for { + recoveredTx, _ := node.GetFacadeHandler().GetTransaction(txHashHex, false) + if recoveredTx != nil { + log.Info("############## send transaction ##############", "txHash", txHashHex) + return txHashHex, nil + } + + time.Sleep(delaySendTxs) + } +} + +func (s *simulator) setStateSystemAccount(state *dtos.AddressState) error { + for shard, node := range s.nodes { + err := node.SetStateForAddress(core.SystemAccountAddress, state) + if err != nil { + return fmt.Errorf("%w for shard %d", err, shard) + } + } + + return nil +} + +func (s *simulator) removeAllSystemAccounts() error { + for shard, node := range s.nodes { + err := node.RemoveAccount(core.SystemAccountAddress) + if err != nil { + return fmt.Errorf("%w for shard %d", err, shard) + } + } + + return nil +} + +// GetAccount will fetch the account of the provided address +func (s *simulator) GetAccount(address dtos.WalletAddress) (api.AccountResponse, error) { + destinationShardID := s.GetNodeHandler(0).GetShardCoordinator().ComputeId(address.Bytes) + + account, _, err := s.GetNodeHandler(destinationShardID).GetFacadeHandler().GetAccount(address.Bech32, api.AccountQueryOptions{}) + return account, err +} + +// Close will stop and close the simulator +func (s *simulator) Close() { + s.mutex.Lock() + defer s.mutex.Unlock() + + var errorStrings []string + for _, n := range s.nodes { + err := n.Close() + if err != nil { + errorStrings = append(errorStrings, err.Error()) + } + } + + if len(errorStrings) != 0 { + log.Error("error closing chain simulator", "error", components.AggregateErrors(errorStrings, components.ErrClose)) + } +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *simulator) IsInterfaceNil() bool { + return s == nil +} + +// GenerateBlsPrivateKeys will generate bls keys +func GenerateBlsPrivateKeys(numOfKeys int) ([][]byte, []string, error) { + blockSigningGenerator := signing.NewKeyGenerator(mcl.NewSuiteBLS12()) + + secretKeysBytes := make([][]byte, 0, numOfKeys) + blsKeysHex := make([]string, 0, numOfKeys) + for idx := 0; idx < numOfKeys; idx++ { + secretKey, publicKey := blockSigningGenerator.GeneratePair() + + secretKeyBytes, err := secretKey.ToByteArray() + if err != nil { + return nil, nil, err + } + + secretKeysBytes = append(secretKeysBytes, secretKeyBytes) + + publicKeyBytes, err := publicKey.ToByteArray() + if err != nil { + return nil, nil, err + } + + blsKeysHex = append(blsKeysHex, hex.EncodeToString(publicKeyBytes)) + } + + return secretKeysBytes, blsKeysHex, nil +} diff --git a/node/chainSimulator/chainSimulator_test.go b/node/chainSimulator/chainSimulator_test.go new file mode 100644 index 00000000000..18f54ccbfe9 --- /dev/null +++ b/node/chainSimulator/chainSimulator_test.go @@ -0,0 +1,432 @@ +package chainSimulator + +import ( + "github.com/multiversx/mx-chain-go/errors" + "math/big" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" + chainSimulatorCommon "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + defaultPathToInitialConfig = "../../cmd/node/config/" +) + +func TestNewChainSimulator(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: core.OptionalUint64{}, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + time.Sleep(time.Second) + + chainSimulator.Close() +} + +func TestChainSimulator_GenerateBlocksShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: core.OptionalUint64{ + HasValue: true, + Value: 20, + }, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + InitialRound: 200000000, + InitialEpoch: 100, + InitialNonce: 100, + AlterConfigsFunction: func(cfg *config.Configs) { + // we need to enable this as this test skips a lot of epoch activations events, and it will fail otherwise + // because the owner of a BLS key coming from genesis is not set + // (the owner is not set at genesis anymore because we do not enable the staking v2 in that phase) + cfg.EpochConfig.EnableEpochs.StakingV2EnableEpoch = 0 + }, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + time.Sleep(time.Second) + + err = chainSimulator.GenerateBlocks(50) + require.Nil(t, err) +} + +func TestChainSimulator_GenerateBlocksAndEpochChangeShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 100, + MetaChainMinNodes: 100, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + facade, err := NewChainSimulatorFacade(chainSimulator) + require.Nil(t, err) + + genesisBalances := make(map[string]*big.Int) + for _, stakeWallet := range chainSimulator.initialWalletKeys.StakeWallets { + initialAccount, errGet := facade.GetExistingAccountFromBech32AddressString(stakeWallet.Address.Bech32) + require.Nil(t, errGet) + + genesisBalances[stakeWallet.Address.Bech32] = initialAccount.GetBalance() + } + + time.Sleep(time.Second) + + err = chainSimulator.GenerateBlocks(80) + require.Nil(t, err) + + numAccountsWithIncreasedBalances := 0 + for _, stakeWallet := range chainSimulator.initialWalletKeys.StakeWallets { + account, errGet := facade.GetExistingAccountFromBech32AddressString(stakeWallet.Address.Bech32) + require.Nil(t, errGet) + + if account.GetBalance().Cmp(genesisBalances[stakeWallet.Address.Bech32]) > 0 { + numAccountsWithIncreasedBalances++ + } + } + + assert.True(t, numAccountsWithIncreasedBalances > 0) +} + +func TestSimulator_TriggerChangeOfEpoch(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 15000, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 100, + MetaChainMinNodes: 100, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + err = chainSimulator.ForceChangeOfEpoch() + require.Nil(t, err) + + err = chainSimulator.ForceChangeOfEpoch() + require.Nil(t, err) + + err = chainSimulator.ForceChangeOfEpoch() + require.Nil(t, err) + + err = chainSimulator.ForceChangeOfEpoch() + require.Nil(t, err) + + metaNode := chainSimulator.GetNodeHandler(core.MetachainShardId) + currentEpoch := metaNode.GetProcessComponents().EpochStartTrigger().Epoch() + require.Equal(t, uint32(4), currentEpoch) +} + +func TestChainSimulator_SetState(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + chainSimulatorCommon.CheckSetState(t, chainSimulator, chainSimulator.GetNodeHandler(0)) +} + +func TestChainSimulator_SetEntireState(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + balance := "431271308732096033771131" + contractAddress := "erd1qqqqqqqqqqqqqpgqmzzm05jeav6d5qvna0q2pmcllelkz8xddz3syjszx5" + accountState := &dtos.AddressState{ + Address: contractAddress, + Nonce: new(uint64), + Balance: balance, + Code: "0061736d010000000129086000006000017f60027f7f017f60027f7f0060017f0060037f7f7f017f60037f7f7f0060017f017f0290020b03656e7619626967496e74476574556e7369676e6564417267756d656e74000303656e760f6765744e756d417267756d656e7473000103656e760b7369676e616c4572726f72000303656e76126d42756666657253746f726167654c6f6164000203656e76176d427566666572546f426967496e74556e7369676e6564000203656e76196d42756666657246726f6d426967496e74556e7369676e6564000203656e76136d42756666657253746f7261676553746f7265000203656e760f6d4275666665725365744279746573000503656e760e636865636b4e6f5061796d656e74000003656e7614626967496e7446696e697368556e7369676e6564000403656e7609626967496e744164640006030b0a010104070301000000000503010003060f027f0041a080080b7f0041a080080b074607066d656d6f7279020004696e697400110667657453756d00120361646400130863616c6c4261636b00140a5f5f646174615f656e6403000b5f5f686561705f6261736503010aca010a0e01017f4100100c2200100020000b1901017f419c8008419c800828020041016b220036020020000b1400100120004604400f0b4180800841191002000b16002000100c220010031a2000100c220010041a20000b1401017f100c2202200110051a2000200210061a0b1301017f100c220041998008410310071a20000b1401017f10084101100d100b210010102000100f0b0e0010084100100d1010100e10090b2201037f10084101100d100b210110102202100e220020002001100a20022000100f0b0300010b0b2f0200418080080b1c77726f6e67206e756d626572206f6620617267756d656e747373756d00419c80080b049cffffff", + CodeHash: "n9EviPlHS6EV+3Xp0YqP28T0IUfeAFRFBIRC1Jw6pyU=", + RootHash: "76cr5Jhn6HmBcDUMIzikEpqFgZxIrOzgNkTHNatXzC4=", + CodeMetadata: "BQY=", + Owner: "erd1ss6u80ruas2phpmr82r42xnkd6rxy40g9jl69frppl4qez9w2jpsqj8x97", + DeveloperRewards: "5401004999998", + Pairs: map[string]string{ + "73756d": "0a", + }, + } + + chainSimulatorCommon.CheckSetEntireState(t, chainSimulator, chainSimulator.GetNodeHandler(1), accountState) +} + +func TestChainSimulator_SetEntireStateWithRemoval(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + balance := "431271308732096033771131" + contractAddress := "erd1qqqqqqqqqqqqqpgqmzzm05jeav6d5qvna0q2pmcllelkz8xddz3syjszx5" + accountState := &dtos.AddressState{ + Address: contractAddress, + Nonce: new(uint64), + Balance: balance, + Code: "0061736d010000000129086000006000017f60027f7f017f60027f7f0060017f0060037f7f7f017f60037f7f7f0060017f017f0290020b03656e7619626967496e74476574556e7369676e6564417267756d656e74000303656e760f6765744e756d417267756d656e7473000103656e760b7369676e616c4572726f72000303656e76126d42756666657253746f726167654c6f6164000203656e76176d427566666572546f426967496e74556e7369676e6564000203656e76196d42756666657246726f6d426967496e74556e7369676e6564000203656e76136d42756666657253746f7261676553746f7265000203656e760f6d4275666665725365744279746573000503656e760e636865636b4e6f5061796d656e74000003656e7614626967496e7446696e697368556e7369676e6564000403656e7609626967496e744164640006030b0a010104070301000000000503010003060f027f0041a080080b7f0041a080080b074607066d656d6f7279020004696e697400110667657453756d00120361646400130863616c6c4261636b00140a5f5f646174615f656e6403000b5f5f686561705f6261736503010aca010a0e01017f4100100c2200100020000b1901017f419c8008419c800828020041016b220036020020000b1400100120004604400f0b4180800841191002000b16002000100c220010031a2000100c220010041a20000b1401017f100c2202200110051a2000200210061a0b1301017f100c220041998008410310071a20000b1401017f10084101100d100b210010102000100f0b0e0010084100100d1010100e10090b2201037f10084101100d100b210110102202100e220020002001100a20022000100f0b0300010b0b2f0200418080080b1c77726f6e67206e756d626572206f6620617267756d656e747373756d00419c80080b049cffffff", + CodeHash: "n9EviPlHS6EV+3Xp0YqP28T0IUfeAFRFBIRC1Jw6pyU=", + RootHash: "eqIumOaMn7G5cNSViK3XHZIW/C392ehfHxOZkHGp+Gc=", // root hash with auto balancing enabled + CodeMetadata: "BQY=", + Owner: "erd1ss6u80ruas2phpmr82r42xnkd6rxy40g9jl69frppl4qez9w2jpsqj8x97", + DeveloperRewards: "5401004999998", + Pairs: map[string]string{ + "73756d": "0a", + }, + } + chainSimulatorCommon.CheckSetEntireStateWithRemoval(t, chainSimulator, chainSimulator.GetNodeHandler(1), accountState) +} + +func TestChainSimulator_GetAccount(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + // the facade's GetAccount method requires that at least one block was produced over the genesis block + _ = chainSimulator.GenerateBlocks(1) + + defer chainSimulator.Close() + + chainSimulatorCommon.CheckGetAccount(t, chainSimulator) +} + +func TestSimulator_SendTransactions(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + chainSimulatorCommon.CheckGenerateTransactions(t, chainSimulator) +} + +func TestSimulator_SentMoveBalanceNoGasForFee(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: true, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + wallet0, err := chainSimulator.GenerateAndMintWalletAddress(0, big.NewInt(0)) + require.Nil(t, err) + + ftx := &transaction.Transaction{ + Nonce: 0, + Value: big.NewInt(0), + SndAddr: wallet0.Bytes, + RcvAddr: wallet0.Bytes, + Data: []byte(""), + GasLimit: 50_000, + GasPrice: 1_000_000_000, + ChainID: []byte(configs.ChainID), + Version: 1, + Signature: []byte("010101"), + } + _, err = chainSimulator.sendTx(ftx) + require.True(t, strings.Contains(err.Error(), errors.ErrInsufficientFunds.Error())) +} diff --git a/node/chainSimulator/components/api/fixedAPIInterface.go b/node/chainSimulator/components/api/fixedAPIInterface.go new file mode 100644 index 00000000000..2848be6ad15 --- /dev/null +++ b/node/chainSimulator/components/api/fixedAPIInterface.go @@ -0,0 +1,21 @@ +package api + +import "fmt" + +type fixedPortAPIConfigurator struct { + restAPIInterface string + mapShardPort map[uint32]int +} + +// NewFixedPortAPIConfigurator will create a new instance of fixedPortAPIConfigurator +func NewFixedPortAPIConfigurator(restAPIInterface string, mapShardPort map[uint32]int) *fixedPortAPIConfigurator { + return &fixedPortAPIConfigurator{ + restAPIInterface: restAPIInterface, + mapShardPort: mapShardPort, + } +} + +// RestApiInterface will return the api interface for the provided shard +func (f *fixedPortAPIConfigurator) RestApiInterface(shardID uint32) string { + return fmt.Sprintf("%s:%d", f.restAPIInterface, f.mapShardPort[shardID]) +} diff --git a/node/chainSimulator/components/api/fixedAPIInterface_test.go b/node/chainSimulator/components/api/fixedAPIInterface_test.go new file mode 100644 index 00000000000..7348b717831 --- /dev/null +++ b/node/chainSimulator/components/api/fixedAPIInterface_test.go @@ -0,0 +1,20 @@ +package api + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +const apiInterface = "127.0.0.1:8080" + +func TestNewFixedPortAPIConfigurator(t *testing.T) { + t.Parallel() + + instance := NewFixedPortAPIConfigurator(apiInterface, map[uint32]int{0: 123}) + require.NotNil(t, instance) + + interf := instance.RestApiInterface(0) + require.Equal(t, fmt.Sprintf("%s:123", apiInterface), interf) +} diff --git a/node/chainSimulator/components/api/freeAPIInterface.go b/node/chainSimulator/components/api/freeAPIInterface.go new file mode 100644 index 00000000000..983ce0d93ca --- /dev/null +++ b/node/chainSimulator/components/api/freeAPIInterface.go @@ -0,0 +1,37 @@ +package api + +import ( + "fmt" + "net" +) + +type freePortAPIConfigurator struct { + restAPIInterface string +} + +// NewFreePortAPIConfigurator will create a new instance of freePortAPIConfigurator +func NewFreePortAPIConfigurator(restAPIInterface string) *freePortAPIConfigurator { + return &freePortAPIConfigurator{ + restAPIInterface: restAPIInterface, + } +} + +// RestApiInterface will return the rest api interface with a free port +func (f *freePortAPIConfigurator) RestApiInterface(_ uint32) string { + return fmt.Sprintf("%s:%d", f.restAPIInterface, getFreePort()) +} + +func getFreePort() int { + // Listen on port 0 to get a free port + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + panic(err) + } + defer func() { + _ = l.Close() + }() + + // Get the port number that was assigned + addr := l.Addr().(*net.TCPAddr) + return addr.Port +} diff --git a/node/chainSimulator/components/api/freeAPIInterface_test.go b/node/chainSimulator/components/api/freeAPIInterface_test.go new file mode 100644 index 00000000000..0b215aa0a57 --- /dev/null +++ b/node/chainSimulator/components/api/freeAPIInterface_test.go @@ -0,0 +1,19 @@ +package api + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewFreePortAPIConfigurator(t *testing.T) { + t.Parallel() + + instance := NewFreePortAPIConfigurator(apiInterface) + require.NotNil(t, instance) + + interf := instance.RestApiInterface(0) + require.True(t, strings.Contains(interf, fmt.Sprintf("%s:", apiInterface))) +} diff --git a/node/chainSimulator/components/api/noApiInterface.go b/node/chainSimulator/components/api/noApiInterface.go new file mode 100644 index 00000000000..cd720c2511f --- /dev/null +++ b/node/chainSimulator/components/api/noApiInterface.go @@ -0,0 +1,15 @@ +package api + +import "github.com/multiversx/mx-chain-go/facade" + +type noAPIInterface struct{} + +// NewNoApiInterface will create a new instance of noAPIInterface +func NewNoApiInterface() *noAPIInterface { + return new(noAPIInterface) +} + +// RestApiInterface will return the value for disable api interface +func (n noAPIInterface) RestApiInterface(_ uint32) string { + return facade.DefaultRestPortOff +} diff --git a/node/chainSimulator/components/api/noApiInterface_test.go b/node/chainSimulator/components/api/noApiInterface_test.go new file mode 100644 index 00000000000..ee8efbc5783 --- /dev/null +++ b/node/chainSimulator/components/api/noApiInterface_test.go @@ -0,0 +1,18 @@ +package api + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/facade" + "github.com/stretchr/testify/require" +) + +func TestNewNoApiInterface(t *testing.T) { + t.Parallel() + + instance := NewNoApiInterface() + require.NotNil(t, instance) + + interf := instance.RestApiInterface(0) + require.Equal(t, facade.DefaultRestPortOff, interf) +} diff --git a/node/chainSimulator/components/bootstrapComponents.go b/node/chainSimulator/components/bootstrapComponents.go new file mode 100644 index 00000000000..7e0190ded2e --- /dev/null +++ b/node/chainSimulator/components/bootstrapComponents.go @@ -0,0 +1,159 @@ +package components + +import ( + "fmt" + "io" + + "github.com/multiversx/mx-chain-core-go/core" + nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/factory" + bootstrapComp "github.com/multiversx/mx-chain-go/factory/bootstrap" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) + +// ArgsBootstrapComponentsHolder will hold the components needed for the bootstrap components holders +type ArgsBootstrapComponentsHolder struct { + CoreComponents factory.CoreComponentsHolder + CryptoComponents factory.CryptoComponentsHolder + NetworkComponents factory.NetworkComponentsHolder + StatusCoreComponents factory.StatusCoreComponentsHolder + WorkingDir string + FlagsConfig config.ContextFlagsConfig + ImportDBConfig config.ImportDbConfig + PrefsConfig config.Preferences + Config config.Config + ShardIDStr string +} + +type bootstrapComponentsHolder struct { + epochStartBootstrapper factory.EpochStartBootstrapper + epochBootstrapParams factory.BootstrapParamsHolder + nodeType core.NodeType + shardCoordinator sharding.Coordinator + versionedHeaderFactory nodeFactory.VersionedHeaderFactory + headerVersionHandler nodeFactory.HeaderVersionHandler + headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + guardedAccountHandler process.GuardedAccountHandler + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory + managedBootstrapComponentsCloser io.Closer +} + +// CreateBootstrapComponents will create a new instance of bootstrap components holder +func CreateBootstrapComponents(args ArgsBootstrapComponentsHolder) (*bootstrapComponentsHolder, error) { + instance := &bootstrapComponentsHolder{} + + args.PrefsConfig.Preferences.DestinationShardAsObserver = args.ShardIDStr + + bootstrapComponentsFactoryArgs := bootstrapComp.BootstrapComponentsFactoryArgs{ + Config: args.Config, + PrefConfig: args.PrefsConfig, + ImportDbConfig: args.ImportDBConfig, + FlagsConfig: args.FlagsConfig, + WorkingDir: args.WorkingDir, + CoreComponents: args.CoreComponents, + CryptoComponents: args.CryptoComponents, + NetworkComponents: args.NetworkComponents, + StatusCoreComponents: args.StatusCoreComponents, + } + + bootstrapComponentsFactory, err := bootstrapComp.NewBootstrapComponentsFactory(bootstrapComponentsFactoryArgs) + if err != nil { + return nil, fmt.Errorf("NewBootstrapComponentsFactory failed: %w", err) + } + + managedBootstrapComponents, err := bootstrapComp.NewManagedBootstrapComponents(bootstrapComponentsFactory) + if err != nil { + return nil, err + } + + err = managedBootstrapComponents.Create() + if err != nil { + return nil, err + } + + instance.epochStartBootstrapper = managedBootstrapComponents.EpochStartBootstrapper() + instance.epochBootstrapParams = managedBootstrapComponents.EpochBootstrapParams() + instance.nodeType = managedBootstrapComponents.NodeType() + instance.shardCoordinator = managedBootstrapComponents.ShardCoordinator() + instance.versionedHeaderFactory = managedBootstrapComponents.VersionedHeaderFactory() + instance.headerVersionHandler = managedBootstrapComponents.HeaderVersionHandler() + instance.headerIntegrityVerifier = managedBootstrapComponents.HeaderIntegrityVerifier() + instance.guardedAccountHandler = managedBootstrapComponents.GuardedAccountHandler() + instance.nodesCoordinatorRegistryFactory = managedBootstrapComponents.NodesCoordinatorRegistryFactory() + instance.managedBootstrapComponentsCloser = managedBootstrapComponents + + return instance, nil +} + +// NodesCoordinatorRegistryFactory will return the nodes coordinator registry factory +func (b *bootstrapComponentsHolder) NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory { + return b.nodesCoordinatorRegistryFactory +} + +// EpochStartBootstrapper will return the epoch start bootstrapper +func (b *bootstrapComponentsHolder) EpochStartBootstrapper() factory.EpochStartBootstrapper { + return b.epochStartBootstrapper +} + +// EpochBootstrapParams will return the epoch bootstrap params +func (b *bootstrapComponentsHolder) EpochBootstrapParams() factory.BootstrapParamsHolder { + return b.epochBootstrapParams +} + +// NodeType will return the node type +func (b *bootstrapComponentsHolder) NodeType() core.NodeType { + return b.nodeType +} + +// ShardCoordinator will return the shardCoordinator +func (b *bootstrapComponentsHolder) ShardCoordinator() sharding.Coordinator { + return b.shardCoordinator +} + +// VersionedHeaderFactory will return the versioned header factory +func (b *bootstrapComponentsHolder) VersionedHeaderFactory() nodeFactory.VersionedHeaderFactory { + return b.versionedHeaderFactory +} + +// HeaderVersionHandler will return header version handler +func (b *bootstrapComponentsHolder) HeaderVersionHandler() nodeFactory.HeaderVersionHandler { + return b.headerVersionHandler +} + +// HeaderIntegrityVerifier will return header integrity verifier +func (b *bootstrapComponentsHolder) HeaderIntegrityVerifier() nodeFactory.HeaderIntegrityVerifierHandler { + return b.headerIntegrityVerifier +} + +// GuardedAccountHandler will return guarded account handler +func (b *bootstrapComponentsHolder) GuardedAccountHandler() process.GuardedAccountHandler { + return b.guardedAccountHandler +} + +// Close will call the Close methods on all inner components +func (b *bootstrapComponentsHolder) Close() error { + return b.managedBootstrapComponentsCloser.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (b *bootstrapComponentsHolder) IsInterfaceNil() bool { + return b == nil +} + +// Create will do nothing +func (b *bootstrapComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (b *bootstrapComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (b *bootstrapComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/bootstrapComponents_test.go b/node/chainSimulator/components/bootstrapComponents_test.go new file mode 100644 index 00000000000..7e4becdc52e --- /dev/null +++ b/node/chainSimulator/components/bootstrapComponents_test.go @@ -0,0 +1,200 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/typeConverters" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func createArgsBootstrapComponentsHolder() ArgsBootstrapComponentsHolder { + return ArgsBootstrapComponentsHolder{ + CoreComponents: &factory.CoreComponentsHolderStub{ + ChainIDCalled: func() string { + return "T" + }, + GenesisNodesSetupCalled: func() sharding.GenesisNodesSetupHandler { + return &genesisMocks.NodesSetupStub{} + }, + InternalMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{} + }, + EpochNotifierCalled: func() process.EpochNotifier { + return &epochNotifier.EpochNotifierStub{} + }, + EconomicsDataCalled: func() process.EconomicsDataHandler { + return &economicsmocks.EconomicsHandlerMock{} + }, + RaterCalled: func() sharding.PeerAccountListAndRatingHandler { + return &testscommon.RaterMock{} + }, + NodesShufflerCalled: func() nodesCoordinator.NodesShuffler { + return &shardingMocks.NodeShufflerMock{} + }, + RoundHandlerCalled: func() consensus.RoundHandler { + return &testscommon.RoundHandlerMock{} + }, + HasherCalled: func() hashing.Hasher { + return &testscommon.HasherStub{} + }, + PathHandlerCalled: func() storage.PathManagerHandler { + return &testscommon.PathManagerStub{} + }, + TxMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{} + }, + AddressPubKeyConverterCalled: func() core.PubkeyConverter { + return &testscommon.PubkeyConverterStub{} + }, + Uint64ByteSliceConverterCalled: func() typeConverters.Uint64ByteSliceConverter { + return &mock.Uint64ByteSliceConverterMock{} + }, + TxSignHasherCalled: func() hashing.Hasher { + return &testscommon.HasherStub{} + }, + EnableEpochsHandlerCalled: func() common.EnableEpochsHandler { + return &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + }, + }, + CryptoComponents: &mock.CryptoComponentsStub{ + PubKey: &mock.PublicKeyMock{}, + BlockSig: &cryptoMocks.SingleSignerStub{}, + BlKeyGen: &cryptoMocks.KeyGenStub{}, + TxSig: &cryptoMocks.SingleSignerStub{}, + TxKeyGen: &cryptoMocks.KeyGenStub{}, + ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + }, + NetworkComponents: &mock.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + }, + StatusCoreComponents: &factory.StatusCoreComponentsStub{ + TrieSyncStatisticsField: &testscommon.SizeSyncStatisticsHandlerStub{}, + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: &testscommon.StateStatisticsHandlerStub{}, + }, + WorkingDir: ".", + FlagsConfig: config.ContextFlagsConfig{}, + ImportDBConfig: config.ImportDbConfig{}, + PrefsConfig: config.Preferences{}, + Config: config.Config{ + EpochStartConfig: config.EpochStartConfig{ + MinNumConnectedPeersToStart: 1, + MinNumOfPeersToConsiderBlockValid: 1, + }, + TrieSync: config.TrieSyncConfig{ + MaxHardCapForMissingNodes: 1, + NumConcurrentTrieSyncers: 1, + }, + GeneralSettings: config.GeneralSettingsConfig{ + SetGuardianEpochsDelay: 1, + }, + Versions: config.VersionsConfig{ + Cache: config.CacheConfig{ + Type: "LRU", + Capacity: 123, + }, + DefaultVersion: "1", + VersionsByEpochs: []config.VersionByEpochs{ + { + StartEpoch: 0, + Version: "1", + }, + }, + }, + WhiteListPool: config.CacheConfig{ + Type: "LRU", + Capacity: 123, + }, + }, + ShardIDStr: "0", + } +} + +func TestCreateBootstrapComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateBootstrapComponents(createArgsBootstrapComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewBootstrapComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsBootstrapComponentsHolder() + args.StatusCoreComponents = &factory.StatusCoreComponentsStub{} + comp, err := CreateBootstrapComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedBootstrapCreate failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsBootstrapComponentsHolder() + args.StatusCoreComponents = &factory.StatusCoreComponentsStub{ + TrieSyncStatisticsField: &testscommon.SizeSyncStatisticsHandlerStub{}, + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + } + comp, err := CreateBootstrapComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestBootstrapComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *bootstrapComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateBootstrapComponents(createArgsBootstrapComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestBootstrapComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateBootstrapComponents(createArgsBootstrapComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.EpochStartBootstrapper()) + require.NotNil(t, comp.EpochBootstrapParams()) + require.NotEmpty(t, comp.NodeType()) + require.NotNil(t, comp.ShardCoordinator()) + require.NotNil(t, comp.VersionedHeaderFactory()) + require.NotNil(t, comp.HeaderVersionHandler()) + require.NotNil(t, comp.HeaderIntegrityVerifier()) + require.NotNil(t, comp.GuardedAccountHandler()) + require.NotNil(t, comp.NodesCoordinatorRegistryFactory()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/closeHandler.go b/node/chainSimulator/components/closeHandler.go new file mode 100644 index 00000000000..19615b50210 --- /dev/null +++ b/node/chainSimulator/components/closeHandler.go @@ -0,0 +1,82 @@ +package components + +import ( + "errors" + "fmt" + "io" + "runtime/debug" + "strings" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" +) + +// ErrClose signals that a close error occurred +var ErrClose = errors.New("error while closing inner components") + +type errorlessCloser interface { + Close() +} + +type allCloser interface { + CloseAll() error +} + +type closeHandler struct { + mut sync.RWMutex + components []interface{} +} + +// NewCloseHandler create a new closeHandler instance +func NewCloseHandler() *closeHandler { + return &closeHandler{ + components: make([]interface{}, 0), + } +} + +// AddComponent will try to add a component to the inner list if that component is not nil +func (handler *closeHandler) AddComponent(component interface{}) { + if check.IfNilReflect(component) { + log.Error("programming error in closeHandler.AddComponent: nil component", "stack", string(debug.Stack())) + return + } + + handler.mut.Lock() + handler.components = append(handler.components, component) + handler.mut.Unlock() +} + +// Close will try to close all components, wrapping errors, if necessary +func (handler *closeHandler) Close() error { + handler.mut.RLock() + defer handler.mut.RUnlock() + + var errorStrings []string + for _, component := range handler.components { + var err error + + switch t := component.(type) { + case errorlessCloser: + t.Close() + case io.Closer: + err = t.Close() + case allCloser: + err = t.CloseAll() + } + + if err != nil { + errorStrings = append(errorStrings, fmt.Errorf("%w while closing the component of type %T", err, component).Error()) + } + } + + return AggregateErrors(errorStrings, ErrClose) +} + +// AggregateErrors can aggregate all provided error strings into a single error variable +func AggregateErrors(errorStrings []string, baseError error) error { + if len(errorStrings) == 0 { + return nil + } + + return fmt.Errorf("%w %s", baseError, strings.Join(errorStrings, ", ")) +} diff --git a/node/chainSimulator/components/closeHandler_test.go b/node/chainSimulator/components/closeHandler_test.go new file mode 100644 index 00000000000..f8a88576c3c --- /dev/null +++ b/node/chainSimulator/components/closeHandler_test.go @@ -0,0 +1,69 @@ +package components + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +// localErrorlessCloser implements errorlessCloser interface +type localErrorlessCloser struct { + wasCalled bool +} + +// Close - +func (closer *localErrorlessCloser) Close() { + closer.wasCalled = true +} + +// localCloser implements io.Closer interface +type localCloser struct { + wasCalled bool + expectedError error +} + +// Close - +func (closer *localCloser) Close() error { + closer.wasCalled = true + return closer.expectedError +} + +// localCloseAllHandler implements allCloser interface +type localCloseAllHandler struct { + wasCalled bool + expectedError error +} + +// CloseAll - +func (closer *localCloseAllHandler) CloseAll() error { + closer.wasCalled = true + return closer.expectedError +} + +func TestCloseHandler(t *testing.T) { + t.Parallel() + + handler := NewCloseHandler() + require.NotNil(t, handler) + + handler.AddComponent(nil) // for coverage only + + lec := &localErrorlessCloser{} + handler.AddComponent(lec) + + lcNoError := &localCloser{} + handler.AddComponent(lcNoError) + + lcWithError := &localCloser{expectedError: expectedErr} + handler.AddComponent(lcWithError) + + lcahNoError := &localCloseAllHandler{} + handler.AddComponent(lcahNoError) + + lcahWithError := &localCloseAllHandler{expectedError: expectedErr} + handler.AddComponent(lcahWithError) + + err := handler.Close() + require.True(t, strings.Contains(err.Error(), expectedErr.Error())) +} diff --git a/node/chainSimulator/components/coreComponents.go b/node/chainSimulator/components/coreComponents.go new file mode 100644 index 00000000000..49a7269d74b --- /dev/null +++ b/node/chainSimulator/components/coreComponents.go @@ -0,0 +1,459 @@ +package components + +import ( + "bytes" + "sync" + "time" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/enablers" + factoryPubKey "github.com/multiversx/mx-chain-go/common/factory" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/ntp" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/economics" + "github.com/multiversx/mx-chain-go/process/rating" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/statusHandler" + "github.com/multiversx/mx-chain-go/storage" + storageFactory "github.com/multiversx/mx-chain-go/storage/factory" + "github.com/multiversx/mx-chain-go/testscommon" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/nodetype" + "github.com/multiversx/mx-chain-core-go/core/versioning" + "github.com/multiversx/mx-chain-core-go/core/watchdog" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/typeConverters" + "github.com/multiversx/mx-chain-core-go/data/typeConverters/uint64ByteSlice" + "github.com/multiversx/mx-chain-core-go/hashing" + hashingFactory "github.com/multiversx/mx-chain-core-go/hashing/factory" + "github.com/multiversx/mx-chain-core-go/marshal" + marshalFactory "github.com/multiversx/mx-chain-core-go/marshal/factory" +) + +type coreComponentsHolder struct { + closeHandler *closeHandler + internalMarshaller marshal.Marshalizer + txMarshaller marshal.Marshalizer + vmMarshaller marshal.Marshalizer + hasher hashing.Hasher + txSignHasher hashing.Hasher + uint64SliceConverter typeConverters.Uint64ByteSliceConverter + addressPubKeyConverter core.PubkeyConverter + validatorPubKeyConverter core.PubkeyConverter + pathHandler storage.PathManagerHandler + watchdog core.WatchdogTimer + alarmScheduler core.TimersScheduler + syncTimer ntp.SyncTimer + roundHandler consensus.RoundHandler + economicsData process.EconomicsDataHandler + apiEconomicsData process.EconomicsDataHandler + ratingsData process.RatingsInfoHandler + rater sharding.PeerAccountListAndRatingHandler + genesisNodesSetup sharding.GenesisNodesSetupHandler + nodesShuffler nodesCoordinator.NodesShuffler + epochNotifier process.EpochNotifier + enableRoundsHandler process.EnableRoundsHandler + roundNotifier process.RoundNotifier + epochStartNotifierWithConfirm factory.EpochStartNotifierWithConfirm + chanStopNodeProcess chan endProcess.ArgEndProcess + genesisTime time.Time + chainID string + minTransactionVersion uint32 + txVersionChecker process.TxVersionCheckerHandler + encodedAddressLen uint32 + nodeTypeProvider core.NodeTypeProviderHandler + wasmVMChangeLocker common.Locker + processStatusHandler common.ProcessStatusHandler + hardforkTriggerPubKey []byte + enableEpochsHandler common.EnableEpochsHandler +} + +// ArgsCoreComponentsHolder will hold arguments needed for the core components holder +type ArgsCoreComponentsHolder struct { + Config config.Config + EnableEpochsConfig config.EnableEpochs + RoundsConfig config.RoundConfig + EconomicsConfig config.EconomicsConfig + RatingConfig config.RatingsConfig + ChanStopNodeProcess chan endProcess.ArgEndProcess + InitialRound int64 + NodesSetupPath string + GasScheduleFilename string + NumShards uint32 + WorkingDir string + + MinNodesPerShard uint32 + ConsensusGroupSize uint32 + MinNodesMeta uint32 + MetaChainConsensusGroupSize uint32 + RoundDurationInMs uint64 +} + +// CreateCoreComponents will create a new instance of factory.CoreComponentsHolder +func CreateCoreComponents(args ArgsCoreComponentsHolder) (*coreComponentsHolder, error) { + var err error + instance := &coreComponentsHolder{ + closeHandler: NewCloseHandler(), + } + + instance.internalMarshaller, err = marshalFactory.NewMarshalizer(args.Config.Marshalizer.Type) + if err != nil { + return nil, err + } + instance.txMarshaller, err = marshalFactory.NewMarshalizer(args.Config.TxSignMarshalizer.Type) + if err != nil { + return nil, err + } + instance.vmMarshaller, err = marshalFactory.NewMarshalizer(args.Config.VmMarshalizer.Type) + if err != nil { + return nil, err + } + instance.hasher, err = hashingFactory.NewHasher(args.Config.Hasher.Type) + if err != nil { + return nil, err + } + instance.txSignHasher, err = hashingFactory.NewHasher(args.Config.TxSignHasher.Type) + if err != nil { + return nil, err + } + instance.uint64SliceConverter = uint64ByteSlice.NewBigEndianConverter() + instance.addressPubKeyConverter, err = factoryPubKey.NewPubkeyConverter(args.Config.AddressPubkeyConverter) + if err != nil { + return nil, err + } + instance.validatorPubKeyConverter, err = factoryPubKey.NewPubkeyConverter(args.Config.ValidatorPubkeyConverter) + if err != nil { + return nil, err + } + + instance.pathHandler, err = storageFactory.CreatePathManager( + storageFactory.ArgCreatePathManager{ + WorkingDir: args.WorkingDir, + ChainID: args.Config.GeneralSettings.ChainID, + }, + ) + if err != nil { + return nil, err + } + + instance.watchdog = &watchdog.DisabledWatchdog{} + instance.alarmScheduler = &mock.AlarmSchedulerStub{} + instance.syncTimer = &testscommon.SyncTimerStub{} + + instance.genesisNodesSetup, err = sharding.NewNodesSetup(args.NodesSetupPath, instance.addressPubKeyConverter, instance.validatorPubKeyConverter, args.NumShards) + if err != nil { + return nil, err + } + + roundDuration := time.Millisecond * time.Duration(instance.genesisNodesSetup.GetRoundDuration()) + instance.roundHandler = NewManualRoundHandler(instance.genesisNodesSetup.GetStartTime(), roundDuration, args.InitialRound) + + instance.wasmVMChangeLocker = &sync.RWMutex{} + instance.txVersionChecker = versioning.NewTxVersionChecker(args.Config.GeneralSettings.MinTransactionVersion) + instance.epochNotifier = forking.NewGenericEpochNotifier() + instance.enableEpochsHandler, err = enablers.NewEnableEpochsHandler(args.EnableEpochsConfig, instance.epochNotifier) + if err != nil { + return nil, err + } + + if err != nil { + return nil, err + } + + argsEconomicsHandler := economics.ArgsNewEconomicsData{ + TxVersionChecker: instance.txVersionChecker, + Economics: &args.EconomicsConfig, + EpochNotifier: instance.epochNotifier, + EnableEpochsHandler: instance.enableEpochsHandler, + } + + instance.economicsData, err = economics.NewEconomicsData(argsEconomicsHandler) + if err != nil { + return nil, err + } + instance.apiEconomicsData = instance.economicsData + + instance.ratingsData, err = rating.NewRatingsData(rating.RatingsDataArg{ + Config: args.RatingConfig, + ShardConsensusSize: args.ConsensusGroupSize, + MetaConsensusSize: args.MetaChainConsensusGroupSize, + ShardMinNodes: args.MinNodesPerShard, + MetaMinNodes: args.MinNodesMeta, + RoundDurationMiliseconds: args.RoundDurationInMs, + }) + if err != nil { + return nil, err + } + + instance.rater, err = rating.NewBlockSigningRater(instance.ratingsData) + if err != nil { + return nil, err + } + + instance.nodesShuffler, err = nodesCoordinator.NewHashValidatorsShuffler(&nodesCoordinator.NodesShufflerArgs{ + NodesShard: args.MinNodesPerShard, + NodesMeta: args.MinNodesMeta, + Hysteresis: 0, + Adaptivity: false, + ShuffleBetweenShards: true, + MaxNodesEnableConfig: args.EnableEpochsConfig.MaxNodesChangeEnableEpoch, + EnableEpochsHandler: instance.enableEpochsHandler, + EnableEpochs: args.EnableEpochsConfig, + }) + if err != nil { + return nil, err + } + + instance.roundNotifier = forking.NewGenericRoundNotifier() + instance.enableRoundsHandler, err = enablers.NewEnableRoundsHandler(args.RoundsConfig, instance.roundNotifier) + if err != nil { + return nil, err + } + + instance.epochStartNotifierWithConfirm = notifier.NewEpochStartSubscriptionHandler() + instance.chanStopNodeProcess = args.ChanStopNodeProcess + instance.genesisTime = time.Unix(instance.genesisNodesSetup.GetStartTime(), 0) + instance.chainID = args.Config.GeneralSettings.ChainID + instance.minTransactionVersion = args.Config.GeneralSettings.MinTransactionVersion + instance.encodedAddressLen, err = computeEncodedAddressLen(instance.addressPubKeyConverter) + if err != nil { + return nil, err + } + + instance.nodeTypeProvider = nodetype.NewNodeTypeProvider(core.NodeTypeObserver) + instance.processStatusHandler = statusHandler.NewProcessStatusHandler() + + pubKeyBytes, err := instance.validatorPubKeyConverter.Decode(args.Config.Hardfork.PublicKeyToListenFrom) + if err != nil { + return nil, err + } + instance.hardforkTriggerPubKey = pubKeyBytes + + instance.collectClosableComponents() + + return instance, nil +} + +func computeEncodedAddressLen(converter core.PubkeyConverter) (uint32, error) { + emptyAddress := bytes.Repeat([]byte{0}, converter.Len()) + encodedEmptyAddress, err := converter.Encode(emptyAddress) + if err != nil { + return 0, err + } + + return uint32(len(encodedEmptyAddress)), nil +} + +// InternalMarshalizer will return the internal marshaller +func (c *coreComponentsHolder) InternalMarshalizer() marshal.Marshalizer { + return c.internalMarshaller +} + +// SetInternalMarshalizer will set the internal marshaller +func (c *coreComponentsHolder) SetInternalMarshalizer(marshalizer marshal.Marshalizer) error { + c.internalMarshaller = marshalizer + return nil +} + +// TxMarshalizer will return the transaction marshaller +func (c *coreComponentsHolder) TxMarshalizer() marshal.Marshalizer { + return c.txMarshaller +} + +// VmMarshalizer will return the vm marshaller +func (c *coreComponentsHolder) VmMarshalizer() marshal.Marshalizer { + return c.vmMarshaller +} + +// Hasher will return the hasher +func (c *coreComponentsHolder) Hasher() hashing.Hasher { + return c.hasher +} + +// TxSignHasher will return the transaction sign hasher +func (c *coreComponentsHolder) TxSignHasher() hashing.Hasher { + return c.txSignHasher +} + +// Uint64ByteSliceConverter will return the uint64 to slice converter +func (c *coreComponentsHolder) Uint64ByteSliceConverter() typeConverters.Uint64ByteSliceConverter { + return c.uint64SliceConverter +} + +// AddressPubKeyConverter will return the address pub key converter +func (c *coreComponentsHolder) AddressPubKeyConverter() core.PubkeyConverter { + return c.addressPubKeyConverter +} + +// ValidatorPubKeyConverter will return the validator pub key converter +func (c *coreComponentsHolder) ValidatorPubKeyConverter() core.PubkeyConverter { + return c.validatorPubKeyConverter +} + +// PathHandler will return the path handler +func (c *coreComponentsHolder) PathHandler() storage.PathManagerHandler { + return c.pathHandler +} + +// Watchdog will return the watch dog +func (c *coreComponentsHolder) Watchdog() core.WatchdogTimer { + return c.watchdog +} + +// AlarmScheduler will return the alarm scheduler +func (c *coreComponentsHolder) AlarmScheduler() core.TimersScheduler { + return c.alarmScheduler +} + +// SyncTimer will return the sync timer +func (c *coreComponentsHolder) SyncTimer() ntp.SyncTimer { + return c.syncTimer +} + +// RoundHandler will return the round handler +func (c *coreComponentsHolder) RoundHandler() consensus.RoundHandler { + return c.roundHandler +} + +// EconomicsData will return the economics data handler +func (c *coreComponentsHolder) EconomicsData() process.EconomicsDataHandler { + return c.economicsData +} + +// APIEconomicsData will return the api economics data handler +func (c *coreComponentsHolder) APIEconomicsData() process.EconomicsDataHandler { + return c.apiEconomicsData +} + +// RatingsData will return the ratings data handler +func (c *coreComponentsHolder) RatingsData() process.RatingsInfoHandler { + return c.ratingsData +} + +// Rater will return the rater handler +func (c *coreComponentsHolder) Rater() sharding.PeerAccountListAndRatingHandler { + return c.rater +} + +// GenesisNodesSetup will return the genesis nodes setup handler +func (c *coreComponentsHolder) GenesisNodesSetup() sharding.GenesisNodesSetupHandler { + return c.genesisNodesSetup +} + +// NodesShuffler will return the nodes shuffler +func (c *coreComponentsHolder) NodesShuffler() nodesCoordinator.NodesShuffler { + return c.nodesShuffler +} + +// EpochNotifier will return the epoch notifier +func (c *coreComponentsHolder) EpochNotifier() process.EpochNotifier { + return c.epochNotifier +} + +// EnableRoundsHandler will return the enable rounds handler +func (c *coreComponentsHolder) EnableRoundsHandler() process.EnableRoundsHandler { + return c.enableRoundsHandler +} + +// RoundNotifier will return the round notifier +func (c *coreComponentsHolder) RoundNotifier() process.RoundNotifier { + return c.roundNotifier +} + +// EpochStartNotifierWithConfirm will return the epoch start notifier with confirm +func (c *coreComponentsHolder) EpochStartNotifierWithConfirm() factory.EpochStartNotifierWithConfirm { + return c.epochStartNotifierWithConfirm +} + +// ChanStopNodeProcess will return the channel for stop node process +func (c *coreComponentsHolder) ChanStopNodeProcess() chan endProcess.ArgEndProcess { + return c.chanStopNodeProcess +} + +// GenesisTime will return the genesis time +func (c *coreComponentsHolder) GenesisTime() time.Time { + return c.genesisTime +} + +// ChainID will return the chain id +func (c *coreComponentsHolder) ChainID() string { + return c.chainID +} + +// MinTransactionVersion will return the min transaction version +func (c *coreComponentsHolder) MinTransactionVersion() uint32 { + return c.minTransactionVersion +} + +// TxVersionChecker will return the tx version checker +func (c *coreComponentsHolder) TxVersionChecker() process.TxVersionCheckerHandler { + return c.txVersionChecker +} + +// EncodedAddressLen will return the len of encoded address +func (c *coreComponentsHolder) EncodedAddressLen() uint32 { + return c.encodedAddressLen +} + +// NodeTypeProvider will return the node type provider +func (c *coreComponentsHolder) NodeTypeProvider() core.NodeTypeProviderHandler { + return c.nodeTypeProvider +} + +// WasmVMChangeLocker will return the wasm vm change locker +func (c *coreComponentsHolder) WasmVMChangeLocker() common.Locker { + return c.wasmVMChangeLocker +} + +// ProcessStatusHandler will return the process status handler +func (c *coreComponentsHolder) ProcessStatusHandler() common.ProcessStatusHandler { + return c.processStatusHandler +} + +// HardforkTriggerPubKey will return the pub key for the hard fork trigger +func (c *coreComponentsHolder) HardforkTriggerPubKey() []byte { + return c.hardforkTriggerPubKey +} + +// EnableEpochsHandler will return the enable epoch handler +func (c *coreComponentsHolder) EnableEpochsHandler() common.EnableEpochsHandler { + return c.enableEpochsHandler +} + +func (c *coreComponentsHolder) collectClosableComponents() { + c.closeHandler.AddComponent(c.alarmScheduler) + c.closeHandler.AddComponent(c.syncTimer) +} + +// Close will call the Close methods on all inner components +func (c *coreComponentsHolder) Close() error { + return c.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (c *coreComponentsHolder) IsInterfaceNil() bool { + return c == nil +} + +// Create will do nothing +func (c *coreComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (c *coreComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (c *coreComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/coreComponents_test.go b/node/chainSimulator/components/coreComponents_test.go new file mode 100644 index 00000000000..7056d9ae48c --- /dev/null +++ b/node/chainSimulator/components/coreComponents_test.go @@ -0,0 +1,306 @@ +package components + +import ( + "encoding/hex" + "testing" + + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/stretchr/testify/require" + + "github.com/multiversx/mx-chain-go/config" +) + +func createArgsCoreComponentsHolder() ArgsCoreComponentsHolder { + return ArgsCoreComponentsHolder{ + Config: config.Config{ + Marshalizer: config.MarshalizerConfig{ + Type: "json", + }, + TxSignMarshalizer: config.TypeConfig{ + Type: "json", + }, + VmMarshalizer: config.TypeConfig{ + Type: "json", + }, + Hasher: config.TypeConfig{ + Type: "blake2b", + }, + TxSignHasher: config.TypeConfig{ + Type: "blake2b", + }, + AddressPubkeyConverter: config.PubkeyConfig{ + Length: 32, + Type: "hex", + }, + ValidatorPubkeyConverter: config.PubkeyConfig{ + Length: 128, + Type: "hex", + }, + GeneralSettings: config.GeneralSettingsConfig{ + ChainID: "T", + MinTransactionVersion: 1, + }, + Hardfork: config.HardforkConfig{ + PublicKeyToListenFrom: "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081", + }, + }, + EnableEpochsConfig: config.EnableEpochs{}, + RoundsConfig: config.RoundConfig{ + RoundActivations: map[string]config.ActivationRoundByName{ + "DisableAsyncCallV1": { + Round: "18446744073709551615", + }, + }, + }, + EconomicsConfig: config.EconomicsConfig{ + GlobalSettings: config.GlobalSettings{ + GenesisTotalSupply: "2000000000000000000000", + MinimumInflation: 0, + YearSettings: []*config.YearSetting{ + { + Year: 0, + MaximumInflation: 0.01, + }, + }, + }, + FeeSettings: config.FeeSettings{ + GasLimitSettings: []config.GasLimitSetting{ + { + MaxGasLimitPerBlock: "10000000000", + MaxGasLimitPerMiniBlock: "10000000000", + MaxGasLimitPerMetaBlock: "10000000000", + MaxGasLimitPerMetaMiniBlock: "10000000000", + MaxGasLimitPerTx: "10000000000", + MinGasLimit: "10", + ExtraGasLimitGuardedTx: "50000", + }, + }, + GasPriceModifier: 0.01, + MinGasPrice: "100", + GasPerDataByte: "1", + MaxGasPriceSetGuardian: "100", + }, + RewardsSettings: config.RewardsSettings{ + RewardsConfigByEpoch: []config.EpochRewardSettings{ + { + LeaderPercentage: 0.1, + DeveloperPercentage: 0.1, + ProtocolSustainabilityPercentage: 0.1, + ProtocolSustainabilityAddress: "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp", + TopUpGradientPoint: "300000000000000000000", + TopUpFactor: 0.25, + EpochEnable: 0, + }, + }, + }, + }, + RatingConfig: config.RatingsConfig{ + General: config.General{ + StartRating: 4000, + MaxRating: 10000, + MinRating: 1, + SignedBlocksThreshold: 0.025, + SelectionChances: []*config.SelectionChance{ + {MaxThreshold: 0, ChancePercent: 1}, + {MaxThreshold: 1, ChancePercent: 2}, + {MaxThreshold: 10000, ChancePercent: 4}, + }, + }, + ShardChain: config.ShardChain{ + RatingSteps: config.RatingSteps{ + HoursToMaxRatingFromStartRating: 2, + ProposerValidatorImportance: 1, + ProposerDecreaseFactor: -4, + ValidatorDecreaseFactor: -4, + ConsecutiveMissedBlocksPenalty: 1.2, + }, + }, + MetaChain: config.MetaChain{ + RatingSteps: config.RatingSteps{ + HoursToMaxRatingFromStartRating: 2, + ProposerValidatorImportance: 1, + ProposerDecreaseFactor: -4, + ValidatorDecreaseFactor: -4, + ConsecutiveMissedBlocksPenalty: 1.3, + }, + }, + }, + ChanStopNodeProcess: make(chan endProcess.ArgEndProcess), + InitialRound: 0, + NodesSetupPath: "../../../sharding/mock/testdata/nodesSetupMock.json", + GasScheduleFilename: "../../../cmd/node/config/gasSchedules/gasScheduleV8.toml", + NumShards: 3, + WorkingDir: ".", + MinNodesPerShard: 1, + MinNodesMeta: 1, + ConsensusGroupSize: 1, + MetaChainConsensusGroupSize: 1, + RoundDurationInMs: 6000, + } +} + +func TestCreateCoreComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateCoreComponents(createArgsCoreComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("internal NewMarshalizer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.Marshalizer.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("tx NewMarshalizer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.TxSignMarshalizer.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("vm NewMarshalizer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.VmMarshalizer.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("main NewHasher failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.Hasher.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("tx NewHasher failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.TxSignHasher.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("address NewPubkeyConverter failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.AddressPubkeyConverter.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("validator NewPubkeyConverter failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.ValidatorPubkeyConverter.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewNodesSetup failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.NumShards = 0 + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewEconomicsData failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.EconomicsConfig.GlobalSettings.MinimumInflation = -1.0 + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("validatorPubKeyConverter.Decode failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.Hardfork.PublicKeyToListenFrom = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestCoreComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *coreComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateCoreComponents(createArgsCoreComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestCoreComponents_GettersSetters(t *testing.T) { + t.Parallel() + + comp, err := CreateCoreComponents(createArgsCoreComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.InternalMarshalizer()) + require.Nil(t, comp.SetInternalMarshalizer(nil)) + require.Nil(t, comp.InternalMarshalizer()) + + require.NotNil(t, comp.TxMarshalizer()) + require.NotNil(t, comp.VmMarshalizer()) + require.NotNil(t, comp.Hasher()) + require.NotNil(t, comp.TxSignHasher()) + require.NotNil(t, comp.Uint64ByteSliceConverter()) + require.NotNil(t, comp.AddressPubKeyConverter()) + require.NotNil(t, comp.ValidatorPubKeyConverter()) + require.NotNil(t, comp.PathHandler()) + require.NotNil(t, comp.Watchdog()) + require.NotNil(t, comp.AlarmScheduler()) + require.NotNil(t, comp.SyncTimer()) + require.NotNil(t, comp.RoundHandler()) + require.NotNil(t, comp.EconomicsData()) + require.NotNil(t, comp.APIEconomicsData()) + require.NotNil(t, comp.RatingsData()) + require.NotNil(t, comp.Rater()) + require.NotNil(t, comp.GenesisNodesSetup()) + require.NotNil(t, comp.NodesShuffler()) + require.NotNil(t, comp.EpochNotifier()) + require.NotNil(t, comp.EnableRoundsHandler()) + require.NotNil(t, comp.RoundNotifier()) + require.NotNil(t, comp.EpochStartNotifierWithConfirm()) + require.NotNil(t, comp.ChanStopNodeProcess()) + require.NotNil(t, comp.GenesisTime()) + require.Equal(t, "T", comp.ChainID()) + require.Equal(t, uint32(1), comp.MinTransactionVersion()) + require.NotNil(t, comp.TxVersionChecker()) + require.Equal(t, uint32(64), comp.EncodedAddressLen()) + hfPk, _ := hex.DecodeString("41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081") + require.Equal(t, hfPk, comp.HardforkTriggerPubKey()) + require.NotNil(t, comp.NodeTypeProvider()) + require.NotNil(t, comp.WasmVMChangeLocker()) + require.NotNil(t, comp.ProcessStatusHandler()) + require.NotNil(t, comp.ProcessStatusHandler()) + require.NotNil(t, comp.EnableEpochsHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/cryptoComponents.go b/node/chainSimulator/components/cryptoComponents.go new file mode 100644 index 00000000000..3fcd7e205b7 --- /dev/null +++ b/node/chainSimulator/components/cryptoComponents.go @@ -0,0 +1,269 @@ +package components + +import ( + "fmt" + "io" + + "github.com/multiversx/mx-chain-core-go/core" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-crypto-go/signing/disabled/singlesig" + "github.com/multiversx/mx-chain-go/common" + cryptoCommon "github.com/multiversx/mx-chain-go/common/crypto" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/factory" + cryptoComp "github.com/multiversx/mx-chain-go/factory/crypto" + "github.com/multiversx/mx-chain-go/vm" +) + +// ArgsCryptoComponentsHolder holds all arguments needed to create a crypto components holder +type ArgsCryptoComponentsHolder struct { + Config config.Config + EnableEpochsConfig config.EnableEpochs + Preferences config.Preferences + CoreComponentsHolder factory.CoreComponentsHolder + AllValidatorKeysPemFileName string + BypassTxSignatureCheck bool +} + +type cryptoComponentsHolder struct { + publicKey crypto.PublicKey + privateKey crypto.PrivateKey + p2pPublicKey crypto.PublicKey + p2pPrivateKey crypto.PrivateKey + p2pSingleSigner crypto.SingleSigner + txSingleSigner crypto.SingleSigner + blockSigner crypto.SingleSigner + multiSignerContainer cryptoCommon.MultiSignerContainer + peerSignatureHandler crypto.PeerSignatureHandler + blockSignKeyGen crypto.KeyGenerator + txSignKeyGen crypto.KeyGenerator + p2pKeyGen crypto.KeyGenerator + messageSignVerifier vm.MessageSignVerifier + consensusSigningHandler consensus.SigningHandler + managedPeersHolder common.ManagedPeersHolder + keysHandler consensus.KeysHandler + publicKeyBytes []byte + publicKeyString string + managedCryptoComponentsCloser io.Closer +} + +// CreateCryptoComponents will create a new instance of cryptoComponentsHolder +func CreateCryptoComponents(args ArgsCryptoComponentsHolder) (*cryptoComponentsHolder, error) { + instance := &cryptoComponentsHolder{} + + cryptoComponentsHandlerArgs := cryptoComp.CryptoComponentsFactoryArgs{ + Config: args.Config, + EnableEpochs: args.EnableEpochsConfig, + PrefsConfig: args.Preferences, + CoreComponentsHolder: args.CoreComponentsHolder, + KeyLoader: core.NewKeyLoader(), + ActivateBLSPubKeyMessageVerification: false, + IsInImportMode: false, + ImportModeNoSigCheck: false, + // set validator key pem file with a file that doesn't exist to all validators key pem file + ValidatorKeyPemFileName: "missing.pem", + AllValidatorKeysPemFileName: args.AllValidatorKeysPemFileName, + } + + cryptoComponentsFactory, err := cryptoComp.NewCryptoComponentsFactory(cryptoComponentsHandlerArgs) + if err != nil { + return nil, fmt.Errorf("NewCryptoComponentsFactory failed: %w", err) + } + + managedCryptoComponents, err := cryptoComp.NewManagedCryptoComponents(cryptoComponentsFactory) + if err != nil { + return nil, err + } + + err = managedCryptoComponents.Create() + if err != nil { + return nil, err + } + + instance.publicKey = managedCryptoComponents.PublicKey() + instance.privateKey = managedCryptoComponents.PrivateKey() + instance.publicKeyBytes, err = instance.publicKey.ToByteArray() + if err != nil { + return nil, err + } + instance.publicKeyString, err = args.CoreComponentsHolder.ValidatorPubKeyConverter().Encode(instance.publicKeyBytes) + if err != nil { + return nil, err + } + + instance.p2pPublicKey = managedCryptoComponents.P2pPublicKey() + instance.p2pPrivateKey = managedCryptoComponents.P2pPrivateKey() + instance.p2pSingleSigner = managedCryptoComponents.P2pSingleSigner() + instance.blockSigner = managedCryptoComponents.BlockSigner() + + instance.multiSignerContainer = managedCryptoComponents.MultiSignerContainer() + instance.peerSignatureHandler = managedCryptoComponents.PeerSignatureHandler() + instance.blockSignKeyGen = managedCryptoComponents.BlockSignKeyGen() + instance.txSignKeyGen = managedCryptoComponents.TxSignKeyGen() + instance.p2pKeyGen = managedCryptoComponents.P2pKeyGen() + instance.messageSignVerifier = managedCryptoComponents.MessageSignVerifier() + instance.consensusSigningHandler = managedCryptoComponents.ConsensusSigningHandler() + instance.managedPeersHolder = managedCryptoComponents.ManagedPeersHolder() + instance.keysHandler = managedCryptoComponents.KeysHandler() + instance.managedCryptoComponentsCloser = managedCryptoComponents + + if args.BypassTxSignatureCheck { + instance.txSingleSigner = &singlesig.DisabledSingleSig{} + } else { + instance.txSingleSigner = managedCryptoComponents.TxSingleSigner() + } + + return instance, nil +} + +// PublicKey will return the public key +func (c *cryptoComponentsHolder) PublicKey() crypto.PublicKey { + return c.publicKey +} + +// PrivateKey will return the private key +func (c *cryptoComponentsHolder) PrivateKey() crypto.PrivateKey { + return c.privateKey +} + +// PublicKeyString will return the private key string +func (c *cryptoComponentsHolder) PublicKeyString() string { + return c.publicKeyString +} + +// PublicKeyBytes will return the public key bytes +func (c *cryptoComponentsHolder) PublicKeyBytes() []byte { + return c.publicKeyBytes +} + +// P2pPublicKey will return the p2p public key +func (c *cryptoComponentsHolder) P2pPublicKey() crypto.PublicKey { + return c.p2pPublicKey +} + +// P2pPrivateKey will return the p2p private key +func (c *cryptoComponentsHolder) P2pPrivateKey() crypto.PrivateKey { + return c.p2pPrivateKey +} + +// P2pSingleSigner will return the p2p single signer +func (c *cryptoComponentsHolder) P2pSingleSigner() crypto.SingleSigner { + return c.p2pSingleSigner +} + +// TxSingleSigner will return the transaction single signer +func (c *cryptoComponentsHolder) TxSingleSigner() crypto.SingleSigner { + return c.txSingleSigner +} + +// BlockSigner will return the block signer +func (c *cryptoComponentsHolder) BlockSigner() crypto.SingleSigner { + return c.blockSigner +} + +// SetMultiSignerContainer will set the multi signer container +func (c *cryptoComponentsHolder) SetMultiSignerContainer(container cryptoCommon.MultiSignerContainer) error { + c.multiSignerContainer = container + + return nil +} + +// MultiSignerContainer will return the multi signer container +func (c *cryptoComponentsHolder) MultiSignerContainer() cryptoCommon.MultiSignerContainer { + return c.multiSignerContainer +} + +// GetMultiSigner will return the multi signer by epoch +func (c *cryptoComponentsHolder) GetMultiSigner(epoch uint32) (crypto.MultiSigner, error) { + return c.MultiSignerContainer().GetMultiSigner(epoch) +} + +// PeerSignatureHandler will return the peer signature handler +func (c *cryptoComponentsHolder) PeerSignatureHandler() crypto.PeerSignatureHandler { + return c.peerSignatureHandler +} + +// BlockSignKeyGen will return the block signer key generator +func (c *cryptoComponentsHolder) BlockSignKeyGen() crypto.KeyGenerator { + return c.blockSignKeyGen +} + +// TxSignKeyGen will return the transaction sign key generator +func (c *cryptoComponentsHolder) TxSignKeyGen() crypto.KeyGenerator { + return c.txSignKeyGen +} + +// P2pKeyGen will return the p2p key generator +func (c *cryptoComponentsHolder) P2pKeyGen() crypto.KeyGenerator { + return c.p2pKeyGen +} + +// MessageSignVerifier will return the message signature verifier +func (c *cryptoComponentsHolder) MessageSignVerifier() vm.MessageSignVerifier { + return c.messageSignVerifier +} + +// ConsensusSigningHandler will return the consensus signing handler +func (c *cryptoComponentsHolder) ConsensusSigningHandler() consensus.SigningHandler { + return c.consensusSigningHandler +} + +// ManagedPeersHolder will return the managed peer holder +func (c *cryptoComponentsHolder) ManagedPeersHolder() common.ManagedPeersHolder { + return c.managedPeersHolder +} + +// KeysHandler will return the keys handler +func (c *cryptoComponentsHolder) KeysHandler() consensus.KeysHandler { + return c.keysHandler +} + +// Clone will clone the cryptoComponentsHolder +func (c *cryptoComponentsHolder) Clone() interface{} { + return &cryptoComponentsHolder{ + publicKey: c.PublicKey(), + privateKey: c.PrivateKey(), + p2pPublicKey: c.P2pPublicKey(), + p2pPrivateKey: c.P2pPrivateKey(), + p2pSingleSigner: c.P2pSingleSigner(), + txSingleSigner: c.TxSingleSigner(), + blockSigner: c.BlockSigner(), + multiSignerContainer: c.MultiSignerContainer(), + peerSignatureHandler: c.PeerSignatureHandler(), + blockSignKeyGen: c.BlockSignKeyGen(), + txSignKeyGen: c.TxSignKeyGen(), + p2pKeyGen: c.P2pKeyGen(), + messageSignVerifier: c.MessageSignVerifier(), + consensusSigningHandler: c.ConsensusSigningHandler(), + managedPeersHolder: c.ManagedPeersHolder(), + keysHandler: c.KeysHandler(), + publicKeyBytes: c.PublicKeyBytes(), + publicKeyString: c.PublicKeyString(), + managedCryptoComponentsCloser: c.managedCryptoComponentsCloser, + } +} + +func (c *cryptoComponentsHolder) IsInterfaceNil() bool { + return c == nil +} + +// Create will do nothing +func (c *cryptoComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (c *cryptoComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (c *cryptoComponentsHolder) String() string { + return "" +} + +// Close will do nothing +func (c *cryptoComponentsHolder) Close() error { + return c.managedCryptoComponentsCloser.Close() +} diff --git a/node/chainSimulator/components/cryptoComponents_test.go b/node/chainSimulator/components/cryptoComponents_test.go new file mode 100644 index 00000000000..3bba81c9b91 --- /dev/null +++ b/node/chainSimulator/components/cryptoComponents_test.go @@ -0,0 +1,168 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/stretchr/testify/require" +) + +func createArgsCryptoComponentsHolder() ArgsCryptoComponentsHolder { + return ArgsCryptoComponentsHolder{ + Config: config.Config{ + Consensus: config.ConsensusConfig{ + Type: "bls", + }, + MultisigHasher: config.TypeConfig{ + Type: "blake2b", + }, + PublicKeyPIDSignature: config.CacheConfig{ + Capacity: 1000, + Type: "LRU", + }, + }, + EnableEpochsConfig: config.EnableEpochs{ + BLSMultiSignerEnableEpoch: []config.MultiSignerConfig{ + { + EnableEpoch: 0, + Type: "no-KOSK", + }, + { + EnableEpoch: 10, + Type: "KOSK", + }, + }, + }, + Preferences: config.Preferences{}, + CoreComponentsHolder: &factory.CoreComponentsHolderStub{ + ValidatorPubKeyConverterCalled: func() core.PubkeyConverter { + return &testscommon.PubkeyConverterStub{ + EncodeCalled: func(pkBytes []byte) (string, error) { + return "public key", nil + }, + } + }, + }, + AllValidatorKeysPemFileName: "allValidatorKeys.pem", + BypassTxSignatureCheck: true, + } +} + +func TestCreateCryptoComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("should work with bypass tx sig check", func(t *testing.T) { + t.Parallel() + + args := createArgsCryptoComponentsHolder() + args.BypassTxSignatureCheck = true + comp, err := CreateCryptoComponents(args) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewCryptoComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCryptoComponentsHolder() + args.CoreComponentsHolder = &factory.CoreComponentsHolderStub{ + ValidatorPubKeyConverterCalled: func() core.PubkeyConverter { + return nil + }, + } + comp, err := CreateCryptoComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedCryptoComponents.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCryptoComponentsHolder() + args.CoreComponentsHolder = &factory.CoreComponentsHolderStub{ + ValidatorPubKeyConverterCalled: func() core.PubkeyConverter { + return &testscommon.PubkeyConverterStub{ + EncodeCalled: func(pkBytes []byte) (string, error) { + return "", expectedErr + }, + } + }, + } + comp, err := CreateCryptoComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestCryptoComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *cryptoComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestCryptoComponentsHolder_GettersSetters(t *testing.T) { + t.Parallel() + + comp, err := CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.PublicKey()) + require.NotNil(t, comp.PrivateKey()) + require.NotEmpty(t, comp.PublicKeyString()) + require.NotEmpty(t, comp.PublicKeyBytes()) + require.NotNil(t, comp.P2pPublicKey()) + require.NotNil(t, comp.P2pPrivateKey()) + require.NotNil(t, comp.P2pSingleSigner()) + require.NotNil(t, comp.TxSingleSigner()) + require.NotNil(t, comp.BlockSigner()) + container := comp.MultiSignerContainer() + require.NotNil(t, container) + require.Nil(t, comp.SetMultiSignerContainer(nil)) + require.Nil(t, comp.MultiSignerContainer()) + require.Nil(t, comp.SetMultiSignerContainer(container)) + signer, err := comp.GetMultiSigner(0) + require.NoError(t, err) + require.NotNil(t, signer) + require.NotNil(t, comp.PeerSignatureHandler()) + require.NotNil(t, comp.BlockSignKeyGen()) + require.NotNil(t, comp.TxSignKeyGen()) + require.NotNil(t, comp.P2pKeyGen()) + require.NotNil(t, comp.MessageSignVerifier()) + require.NotNil(t, comp.ConsensusSigningHandler()) + require.NotNil(t, comp.ManagedPeersHolder()) + require.NotNil(t, comp.KeysHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} + +func TestCryptoComponentsHolder_Clone(t *testing.T) { + t.Parallel() + + comp, err := CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.NoError(t, err) + + compClone := comp.Clone() + require.Equal(t, comp, compClone) + require.False(t, comp == compClone) // pointer testing + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/dataComponents.go b/node/chainSimulator/components/dataComponents.go new file mode 100644 index 00000000000..8f04c351509 --- /dev/null +++ b/node/chainSimulator/components/dataComponents.go @@ -0,0 +1,124 @@ +package components + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dataRetriever/provider" + "github.com/multiversx/mx-chain-go/factory" +) + +// ArgsDataComponentsHolder will hold the components needed for data components +type ArgsDataComponentsHolder struct { + Chain data.ChainHandler + StorageService dataRetriever.StorageService + DataPool dataRetriever.PoolsHolder + InternalMarshaller marshal.Marshalizer +} + +type dataComponentsHolder struct { + closeHandler *closeHandler + chain data.ChainHandler + storageService dataRetriever.StorageService + dataPool dataRetriever.PoolsHolder + miniBlockProvider factory.MiniBlockProvider +} + +// CreateDataComponents will create the data components holder +func CreateDataComponents(args ArgsDataComponentsHolder) (*dataComponentsHolder, error) { + miniBlockStorer, err := args.StorageService.GetStorer(dataRetriever.MiniBlockUnit) + if err != nil { + return nil, err + } + + arg := provider.ArgMiniBlockProvider{ + MiniBlockPool: args.DataPool.MiniBlocks(), + MiniBlockStorage: miniBlockStorer, + Marshalizer: args.InternalMarshaller, + } + + miniBlocksProvider, err := provider.NewMiniBlockProvider(arg) + if err != nil { + return nil, err + } + + instance := &dataComponentsHolder{ + closeHandler: NewCloseHandler(), + chain: args.Chain, + storageService: args.StorageService, + dataPool: args.DataPool, + miniBlockProvider: miniBlocksProvider, + } + + instance.collectClosableComponents() + + return instance, nil +} + +// Blockchain will return the blockchain handler +func (d *dataComponentsHolder) Blockchain() data.ChainHandler { + return d.chain +} + +// SetBlockchain will set the blockchain handler +func (d *dataComponentsHolder) SetBlockchain(chain data.ChainHandler) error { + d.chain = chain + + return nil +} + +// StorageService will return the storage service +func (d *dataComponentsHolder) StorageService() dataRetriever.StorageService { + return d.storageService +} + +// Datapool will return the data pool +func (d *dataComponentsHolder) Datapool() dataRetriever.PoolsHolder { + return d.dataPool +} + +// MiniBlocksProvider will return the mini blocks provider +func (d *dataComponentsHolder) MiniBlocksProvider() factory.MiniBlockProvider { + return d.miniBlockProvider +} + +// Clone will clone the data components holder +func (d *dataComponentsHolder) Clone() interface{} { + return &dataComponentsHolder{ + chain: d.chain, + storageService: d.storageService, + dataPool: d.dataPool, + miniBlockProvider: d.miniBlockProvider, + closeHandler: d.closeHandler, + } +} + +func (d *dataComponentsHolder) collectClosableComponents() { + d.closeHandler.AddComponent(d.storageService) + d.closeHandler.AddComponent(d.dataPool) +} + +// Close will call the Close methods on all inner components +func (d *dataComponentsHolder) Close() error { + return d.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (d *dataComponentsHolder) IsInterfaceNil() bool { + return d == nil +} + +// Create will do nothing +func (d *dataComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (d *dataComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (d *dataComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/dataComponents_test.go b/node/chainSimulator/components/dataComponents_test.go new file mode 100644 index 00000000000..a74f0b751f6 --- /dev/null +++ b/node/chainSimulator/components/dataComponents_test.go @@ -0,0 +1,110 @@ +package components + +import ( + "testing" + + retriever "github.com/multiversx/mx-chain-go/dataRetriever" + chainStorage "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/stretchr/testify/require" +) + +func createArgsDataComponentsHolder() ArgsDataComponentsHolder { + return ArgsDataComponentsHolder{ + Chain: &testscommon.ChainHandlerStub{}, + StorageService: &storage.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (chainStorage.Storer, error) { + return &storage.StorerStub{}, nil + }, + }, + DataPool: &dataRetriever.PoolsHolderStub{ + MiniBlocksCalled: func() chainStorage.Cacher { + return &testscommon.CacherStub{} + }, + }, + InternalMarshaller: &testscommon.MarshallerStub{}, + } +} + +func TestCreateDataComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateDataComponents(createArgsDataComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewMiniBlockProvider failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsDataComponentsHolder() + args.DataPool = &dataRetriever.PoolsHolderStub{ + MiniBlocksCalled: func() chainStorage.Cacher { + return nil + }, + } + comp, err := CreateDataComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("GetStorer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsDataComponentsHolder() + args.StorageService = &storage.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (chainStorage.Storer, error) { + return nil, expectedErr + }, + } + comp, err := CreateDataComponents(args) + require.Equal(t, expectedErr, err) + require.Nil(t, comp) + }) +} + +func TestDataComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *dataComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateDataComponents(createArgsDataComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestDataComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateDataComponents(createArgsDataComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.Blockchain()) + require.Nil(t, comp.SetBlockchain(nil)) + require.Nil(t, comp.Blockchain()) + require.NotNil(t, comp.StorageService()) + require.NotNil(t, comp.Datapool()) + require.NotNil(t, comp.MiniBlocksProvider()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} + +func TestDataComponentsHolder_Clone(t *testing.T) { + t.Parallel() + + comp, err := CreateDataComponents(createArgsDataComponentsHolder()) + require.NoError(t, err) + + compClone := comp.Clone() + require.Equal(t, comp, compClone) + require.False(t, comp == compClone) // pointer testing + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/instantBroadcastMessenger.go b/node/chainSimulator/components/instantBroadcastMessenger.go new file mode 100644 index 00000000000..893fc4edbc7 --- /dev/null +++ b/node/chainSimulator/components/instantBroadcastMessenger.go @@ -0,0 +1,106 @@ +package components + +import ( + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/sharding" +) + +type instantBroadcastMessenger struct { + consensus.BroadcastMessenger + shardCoordinator sharding.Coordinator +} + +// NewInstantBroadcastMessenger creates a new instance of type instantBroadcastMessenger +func NewInstantBroadcastMessenger(broadcastMessenger consensus.BroadcastMessenger, shardCoordinator sharding.Coordinator) (*instantBroadcastMessenger, error) { + if check.IfNil(broadcastMessenger) { + return nil, errors.ErrNilBroadcastMessenger + } + if check.IfNil(shardCoordinator) { + return nil, errors.ErrNilShardCoordinator + } + + return &instantBroadcastMessenger{ + BroadcastMessenger: broadcastMessenger, + shardCoordinator: shardCoordinator, + }, nil +} + +// BroadcastBlockDataLeader broadcasts the block data as consensus group leader +func (messenger *instantBroadcastMessenger) BroadcastBlockDataLeader(_ data.HeaderHandler, miniBlocks map[uint32][]byte, transactions map[string][][]byte, pkBytes []byte) error { + if messenger.shardCoordinator.SelfId() == common.MetachainShardId { + return messenger.broadcastMiniblockData(miniBlocks, transactions, pkBytes) + } + + return messenger.broadcastBlockDataLeaderWhenShard(miniBlocks, transactions, pkBytes) +} + +func (messenger *instantBroadcastMessenger) broadcastBlockDataLeaderWhenShard(miniBlocks map[uint32][]byte, transactions map[string][][]byte, pkBytes []byte) error { + if len(miniBlocks) == 0 { + return nil + } + + metaMiniBlocks, metaTransactions := messenger.extractMetaMiniBlocksAndTransactions(miniBlocks, transactions) + + return messenger.broadcastMiniblockData(metaMiniBlocks, metaTransactions, pkBytes) +} + +func (messenger *instantBroadcastMessenger) broadcastMiniblockData(miniBlocks map[uint32][]byte, transactions map[string][][]byte, pkBytes []byte) error { + if len(miniBlocks) > 0 { + err := messenger.BroadcastMiniBlocks(miniBlocks, pkBytes) + if err != nil { + log.Warn("instantBroadcastMessenger.BroadcastBlockData: broadcast miniblocks", "error", err.Error()) + } + } + + if len(transactions) > 0 { + err := messenger.BroadcastTransactions(transactions, pkBytes) + if err != nil { + log.Warn("instantBroadcastMessenger.BroadcastBlockData: broadcast transactions", "error", err.Error()) + } + } + + return nil +} + +func (messenger *instantBroadcastMessenger) extractMetaMiniBlocksAndTransactions( + miniBlocks map[uint32][]byte, + transactions map[string][][]byte, +) (map[uint32][]byte, map[string][][]byte) { + + metaMiniBlocks := make(map[uint32][]byte) + metaTransactions := make(map[string][][]byte) + + for shardID, mbsMarshalized := range miniBlocks { + if shardID != core.MetachainShardId { + continue + } + + metaMiniBlocks[shardID] = mbsMarshalized + delete(miniBlocks, shardID) + } + + identifier := messenger.shardCoordinator.CommunicationIdentifier(core.MetachainShardId) + + for broadcastTopic, txsMarshalized := range transactions { + if !strings.Contains(broadcastTopic, identifier) { + continue + } + + metaTransactions[broadcastTopic] = txsMarshalized + delete(transactions, broadcastTopic) + } + + return metaMiniBlocks, metaTransactions +} + +// IsInterfaceNil returns true if there is no value under the interface +func (messenger *instantBroadcastMessenger) IsInterfaceNil() bool { + return messenger == nil +} diff --git a/node/chainSimulator/components/instantBroadcastMessenger_test.go b/node/chainSimulator/components/instantBroadcastMessenger_test.go new file mode 100644 index 00000000000..361caa03bbc --- /dev/null +++ b/node/chainSimulator/components/instantBroadcastMessenger_test.go @@ -0,0 +1,134 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus/mock" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/stretchr/testify/require" +) + +func TestNewInstantBroadcastMessenger(t *testing.T) { + t.Parallel() + + t.Run("nil broadcastMessenger should error", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(nil, nil) + require.Equal(t, errorsMx.ErrNilBroadcastMessenger, err) + require.Nil(t, mes) + }) + t.Run("nil shardCoordinator should error", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{}, nil) + require.Equal(t, errorsMx.ErrNilShardCoordinator, err) + require.Nil(t, mes) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{}, &mock.ShardCoordinatorMock{}) + require.NoError(t, err) + require.NotNil(t, mes) + }) +} + +func TestInstantBroadcastMessenger_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var mes *instantBroadcastMessenger + require.True(t, mes.IsInterfaceNil()) + + mes, _ = NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{}, &mock.ShardCoordinatorMock{}) + require.False(t, mes.IsInterfaceNil()) +} + +func TestInstantBroadcastMessenger_BroadcastBlockDataLeader(t *testing.T) { + t.Parallel() + + t.Run("meta should work", func(t *testing.T) { + t.Parallel() + + providedMBs := map[uint32][]byte{ + 0: []byte("mb shard 0"), + 1: []byte("mb shard 1"), + common.MetachainShardId: []byte("mb shard meta"), + } + providedTxs := map[string][][]byte{ + "topic_0": {[]byte("txs topic 0")}, + "topic_1": {[]byte("txs topic 1")}, + } + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{ + BroadcastMiniBlocksCalled: func(mbs map[uint32][]byte, bytes []byte) error { + require.Equal(t, providedMBs, mbs) + return expectedErr // for coverage only + }, + BroadcastTransactionsCalled: func(txs map[string][][]byte, bytes []byte) error { + require.Equal(t, providedTxs, txs) + return expectedErr // for coverage only + }, + }, &mock.ShardCoordinatorMock{ + ShardID: common.MetachainShardId, + }) + require.NoError(t, err) + + err = mes.BroadcastBlockDataLeader(nil, providedMBs, providedTxs, []byte("pk")) + require.NoError(t, err) + }) + t.Run("shard should work", func(t *testing.T) { + t.Parallel() + + providedMBs := map[uint32][]byte{ + 0: []byte("mb shard 0"), // for coverage only + common.MetachainShardId: []byte("mb shard meta"), + } + expectedMBs := map[uint32][]byte{ + common.MetachainShardId: []byte("mb shard meta"), + } + providedTxs := map[string][][]byte{ + "topic_0": {[]byte("txs topic 1")}, // for coverage only + "topic_0_META": {[]byte("txs topic meta")}, + } + expectedTxs := map[string][][]byte{ + "topic_0_META": {[]byte("txs topic meta")}, + } + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{ + BroadcastMiniBlocksCalled: func(mbs map[uint32][]byte, bytes []byte) error { + require.Equal(t, expectedMBs, mbs) + return nil + }, + BroadcastTransactionsCalled: func(txs map[string][][]byte, bytes []byte) error { + require.Equal(t, expectedTxs, txs) + return nil + }, + }, &mock.ShardCoordinatorMock{ + ShardID: 0, + }) + require.NoError(t, err) + + err = mes.BroadcastBlockDataLeader(nil, providedMBs, providedTxs, []byte("pk")) + require.NoError(t, err) + }) + t.Run("shard, empty miniblocks should early exit", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{ + BroadcastMiniBlocksCalled: func(mbs map[uint32][]byte, bytes []byte) error { + require.Fail(t, "should have not been called") + return nil + }, + BroadcastTransactionsCalled: func(txs map[string][][]byte, bytes []byte) error { + require.Fail(t, "should have not been called") + return nil + }, + }, &mock.ShardCoordinatorMock{ + ShardID: 0, + }) + require.NoError(t, err) + + err = mes.BroadcastBlockDataLeader(nil, nil, nil, []byte("pk")) + require.NoError(t, err) + }) +} diff --git a/node/chainSimulator/components/interface.go b/node/chainSimulator/components/interface.go new file mode 100644 index 00000000000..6456c1e2b32 --- /dev/null +++ b/node/chainSimulator/components/interface.go @@ -0,0 +1,24 @@ +package components + +import "github.com/multiversx/mx-chain-core-go/core" + +// SyncedBroadcastNetworkHandler defines the synced network interface +type SyncedBroadcastNetworkHandler interface { + RegisterMessageReceiver(handler messageReceiver, pid core.PeerID) + Broadcast(pid core.PeerID, topic string, buff []byte) + SendDirectly(from core.PeerID, topic string, buff []byte, to core.PeerID) error + GetConnectedPeers() []core.PeerID + GetConnectedPeersOnTopic(topic string) []core.PeerID + IsInterfaceNil() bool +} + +// APIConfigurator defines what an api configurator should be able to do +type APIConfigurator interface { + RestApiInterface(shardID uint32) string +} + +// NetworkMessenger defines what a network messenger should do +type NetworkMessenger interface { + Broadcast(topic string, buff []byte) + IsInterfaceNil() bool +} diff --git a/node/chainSimulator/components/manualRoundHandler.go b/node/chainSimulator/components/manualRoundHandler.go new file mode 100644 index 00000000000..479cf63a1f5 --- /dev/null +++ b/node/chainSimulator/components/manualRoundHandler.go @@ -0,0 +1,66 @@ +package components + +import ( + "sync/atomic" + "time" +) + +type manualRoundHandler struct { + index int64 + genesisTimeStamp int64 + roundDuration time.Duration + initialRound int64 +} + +// NewManualRoundHandler returns a manual round handler instance +func NewManualRoundHandler(genesisTimeStamp int64, roundDuration time.Duration, initialRound int64) *manualRoundHandler { + return &manualRoundHandler{ + genesisTimeStamp: genesisTimeStamp, + roundDuration: roundDuration, + index: initialRound, + initialRound: initialRound, + } +} + +// IncrementIndex will increment the current round index +func (handler *manualRoundHandler) IncrementIndex() { + atomic.AddInt64(&handler.index, 1) +} + +// Index returns the current index +func (handler *manualRoundHandler) Index() int64 { + return atomic.LoadInt64(&handler.index) +} + +// BeforeGenesis returns false +func (handler *manualRoundHandler) BeforeGenesis() bool { + return false +} + +// UpdateRound does nothing as this implementation does not work with real timers +func (handler *manualRoundHandler) UpdateRound(_ time.Time, _ time.Time) { +} + +// TimeStamp returns the time based of the genesis timestamp and the current round +func (handler *manualRoundHandler) TimeStamp() time.Time { + rounds := atomic.LoadInt64(&handler.index) + timeFromGenesis := handler.roundDuration * time.Duration(rounds) + timestamp := time.Unix(handler.genesisTimeStamp, 0).Add(timeFromGenesis) + timestamp = time.Unix(timestamp.Unix()-int64(handler.roundDuration.Seconds())*handler.initialRound, 0) + return timestamp +} + +// TimeDuration returns the provided time duration for this instance +func (handler *manualRoundHandler) TimeDuration() time.Duration { + return handler.roundDuration +} + +// RemainingTime returns the max time as the start time is not taken into account +func (handler *manualRoundHandler) RemainingTime(_ time.Time, maxTime time.Duration) time.Duration { + return maxTime +} + +// IsInterfaceNil returns true if there is no value under the interface +func (handler *manualRoundHandler) IsInterfaceNil() bool { + return handler == nil +} diff --git a/node/chainSimulator/components/manualRoundHandler_test.go b/node/chainSimulator/components/manualRoundHandler_test.go new file mode 100644 index 00000000000..8a866d6ccec --- /dev/null +++ b/node/chainSimulator/components/manualRoundHandler_test.go @@ -0,0 +1,44 @@ +package components + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestNewManualRoundHandler(t *testing.T) { + t.Parallel() + + handler := NewManualRoundHandler(100, time.Second, 0) + require.NotNil(t, handler) +} + +func TestManualRoundHandler_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var handler *manualRoundHandler + require.True(t, handler.IsInterfaceNil()) + + handler = NewManualRoundHandler(100, time.Second, 0) + require.False(t, handler.IsInterfaceNil()) +} + +func TestManualRoundHandler_Operations(t *testing.T) { + t.Parallel() + + genesisTime := time.Now() + providedIndex := int64(0) + providedRoundDuration := time.Second + handler := NewManualRoundHandler(genesisTime.Unix(), providedRoundDuration, providedIndex) + require.Equal(t, providedIndex, handler.Index()) + handler.IncrementIndex() + require.Equal(t, providedIndex+1, handler.Index()) + expectedTimestamp := time.Unix(handler.genesisTimeStamp, 0).Add(providedRoundDuration) + require.Equal(t, expectedTimestamp, handler.TimeStamp()) + require.Equal(t, providedRoundDuration, handler.TimeDuration()) + providedMaxTime := time.Minute + require.Equal(t, providedMaxTime, handler.RemainingTime(time.Now(), providedMaxTime)) + require.False(t, handler.BeforeGenesis()) + handler.UpdateRound(time.Now(), time.Now()) // for coverage only +} diff --git a/node/chainSimulator/components/memoryComponents.go b/node/chainSimulator/components/memoryComponents.go new file mode 100644 index 00000000000..3b12e720756 --- /dev/null +++ b/node/chainSimulator/components/memoryComponents.go @@ -0,0 +1,82 @@ +package components + +import ( + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/storage/database" + "github.com/multiversx/mx-chain-go/storage/storageunit" +) + +// CreateMemUnit creates a new in-memory storage unit +func CreateMemUnit() storage.Storer { + capacity := uint32(10) + shards := uint32(1) + sizeInBytes := uint64(0) + cache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: capacity, Shards: shards, SizeInBytes: sizeInBytes}) + persist, _ := database.NewlruDB(100000) + unit, _ := storageunit.NewStorageUnit(cache, persist) + + return unit +} + +type trieStorage struct { + storage.Storer +} + +// CreateMemUnitForTries returns a special type of storer used on tries instances +func CreateMemUnitForTries() storage.Storer { + return &trieStorage{ + Storer: CreateMemUnit(), + } +} + +// SetEpochForPutOperation does nothing +func (store *trieStorage) SetEpochForPutOperation(_ uint32) { +} + +// GetFromOldEpochsWithoutAddingToCache tries to get directly the key +func (store *trieStorage) GetFromOldEpochsWithoutAddingToCache(key []byte) ([]byte, core.OptionalUint32, error) { + value, err := store.Get(key) + + return value, core.OptionalUint32{}, err +} + +// GetFromLastEpoch tries to get directly the key +func (store *trieStorage) GetFromLastEpoch(key []byte) ([]byte, error) { + return store.Get(key) +} + +// PutInEpoch will put the key directly +func (store *trieStorage) PutInEpoch(key []byte, data []byte, _ uint32) error { + return store.Put(key, data) +} + +// PutInEpochWithoutCache will put the key directly +func (store *trieStorage) PutInEpochWithoutCache(key []byte, data []byte, _ uint32) error { + return store.Put(key, data) +} + +// GetLatestStorageEpoch returns 0 +func (store *trieStorage) GetLatestStorageEpoch() (uint32, error) { + return 0, nil +} + +// GetFromCurrentEpoch tries to get directly the key +func (store *trieStorage) GetFromCurrentEpoch(key []byte) ([]byte, error) { + return store.Get(key) +} + +// GetFromEpoch tries to get directly the key +func (store *trieStorage) GetFromEpoch(key []byte, _ uint32) ([]byte, error) { + return store.Get(key) +} + +// RemoveFromCurrentEpoch removes directly the key +func (store *trieStorage) RemoveFromCurrentEpoch(key []byte) error { + return store.Remove(key) +} + +// RemoveFromAllActiveEpochs removes directly the key +func (store *trieStorage) RemoveFromAllActiveEpochs(key []byte) error { + return store.Remove(key) +} diff --git a/node/chainSimulator/components/memoryComponents_test.go b/node/chainSimulator/components/memoryComponents_test.go new file mode 100644 index 00000000000..b393bca7d47 --- /dev/null +++ b/node/chainSimulator/components/memoryComponents_test.go @@ -0,0 +1,55 @@ +package components + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCreateMemUnitForTries(t *testing.T) { + t.Parallel() + + memUnitStorer := CreateMemUnitForTries() + require.NotNil(t, memUnitStorer) + + memUnit, ok := memUnitStorer.(*trieStorage) + require.True(t, ok) + memUnit.SetEpochForPutOperation(0) // for coverage only + key := []byte("key") + data := []byte("data") + require.NoError(t, memUnit.Put(key, data)) + + require.NoError(t, memUnit.PutInEpoch(key, data, 0)) + require.NoError(t, memUnit.PutInEpochWithoutCache(key, data, 0)) + + value, _, err := memUnit.GetFromOldEpochsWithoutAddingToCache(key) + require.NoError(t, err) + require.Equal(t, data, value) + + latest, err := memUnit.GetLatestStorageEpoch() + require.NoError(t, err) + require.Zero(t, latest) + + value, err = memUnit.GetFromCurrentEpoch(key) + require.NoError(t, err) + require.Equal(t, data, value) + + value, err = memUnit.GetFromEpoch(key, 0) + require.NoError(t, err) + require.Equal(t, data, value) + + value, err = memUnit.GetFromLastEpoch(key) + require.NoError(t, err) + require.Equal(t, data, value) + + require.NoError(t, memUnit.RemoveFromCurrentEpoch(key)) + value, err = memUnit.GetFromCurrentEpoch(key) + require.Error(t, err) + require.Empty(t, value) + + require.NoError(t, memUnit.PutInEpoch(key, data, 0)) + require.NoError(t, memUnit.RemoveFromAllActiveEpochs(key)) + value, err = memUnit.GetFromCurrentEpoch(key) + require.Error(t, err) + require.Empty(t, value) +} diff --git a/node/chainSimulator/components/networkComponents.go b/node/chainSimulator/components/networkComponents.go new file mode 100644 index 00000000000..6b791f6927b --- /dev/null +++ b/node/chainSimulator/components/networkComponents.go @@ -0,0 +1,142 @@ +package components + +import ( + disabledBootstrap "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" + "github.com/multiversx/mx-chain-go/factory" + disabledFactory "github.com/multiversx/mx-chain-go/factory/disabled" + "github.com/multiversx/mx-chain-go/node/chainSimulator/disabled" + "github.com/multiversx/mx-chain-go/p2p" + disabledP2P "github.com/multiversx/mx-chain-go/p2p/disabled" + "github.com/multiversx/mx-chain-go/process" + disabledAntiflood "github.com/multiversx/mx-chain-go/process/throttle/antiflood/disabled" +) + +type networkComponentsHolder struct { + closeHandler *closeHandler + networkMessenger p2p.Messenger + inputAntiFloodHandler factory.P2PAntifloodHandler + outputAntiFloodHandler factory.P2PAntifloodHandler + pubKeyCacher process.TimeCacher + peerBlackListHandler process.PeerBlackListCacher + peerHonestyHandler factory.PeerHonestyHandler + preferredPeersHolderHandler factory.PreferredPeersHolderHandler + peersRatingHandler p2p.PeersRatingHandler + peersRatingMonitor p2p.PeersRatingMonitor + fullArchiveNetworkMessenger p2p.Messenger + fullArchivePreferredPeersHolderHandler factory.PreferredPeersHolderHandler +} + +// CreateNetworkComponents creates a new networkComponentsHolder instance +func CreateNetworkComponents(network SyncedBroadcastNetworkHandler) (*networkComponentsHolder, error) { + messenger, err := NewSyncedMessenger(network) + if err != nil { + return nil, err + } + + instance := &networkComponentsHolder{ + closeHandler: NewCloseHandler(), + networkMessenger: messenger, + inputAntiFloodHandler: disabled.NewAntiFlooder(), + outputAntiFloodHandler: disabled.NewAntiFlooder(), + pubKeyCacher: &disabledAntiflood.TimeCache{}, + peerBlackListHandler: &disabledAntiflood.PeerBlacklistCacher{}, + peerHonestyHandler: disabled.NewPeerHonesty(), + preferredPeersHolderHandler: disabledFactory.NewPreferredPeersHolder(), + peersRatingHandler: disabledBootstrap.NewDisabledPeersRatingHandler(), + peersRatingMonitor: disabled.NewPeersRatingMonitor(), + fullArchiveNetworkMessenger: disabledP2P.NewNetworkMessenger(), + fullArchivePreferredPeersHolderHandler: disabledFactory.NewPreferredPeersHolder(), + } + + instance.collectClosableComponents() + + return instance, nil +} + +// NetworkMessenger returns the network messenger +func (holder *networkComponentsHolder) NetworkMessenger() p2p.Messenger { + return holder.networkMessenger +} + +// InputAntiFloodHandler returns the input antiflooder +func (holder *networkComponentsHolder) InputAntiFloodHandler() factory.P2PAntifloodHandler { + return holder.inputAntiFloodHandler +} + +// OutputAntiFloodHandler returns the output antiflooder +func (holder *networkComponentsHolder) OutputAntiFloodHandler() factory.P2PAntifloodHandler { + return holder.outputAntiFloodHandler +} + +// PubKeyCacher returns the public key cacher +func (holder *networkComponentsHolder) PubKeyCacher() process.TimeCacher { + return holder.pubKeyCacher +} + +// PeerBlackListHandler returns the peer blacklist handler +func (holder *networkComponentsHolder) PeerBlackListHandler() process.PeerBlackListCacher { + return holder.peerBlackListHandler +} + +// PeerHonestyHandler returns the peer honesty handler +func (holder *networkComponentsHolder) PeerHonestyHandler() factory.PeerHonestyHandler { + return holder.peerHonestyHandler +} + +// PreferredPeersHolderHandler returns the preferred peers holder +func (holder *networkComponentsHolder) PreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + return holder.preferredPeersHolderHandler +} + +// PeersRatingHandler returns the peers rating handler +func (holder *networkComponentsHolder) PeersRatingHandler() p2p.PeersRatingHandler { + return holder.peersRatingHandler +} + +// PeersRatingMonitor returns the peers rating monitor +func (holder *networkComponentsHolder) PeersRatingMonitor() p2p.PeersRatingMonitor { + return holder.peersRatingMonitor +} + +// FullArchiveNetworkMessenger returns the full archive network messenger +func (holder *networkComponentsHolder) FullArchiveNetworkMessenger() p2p.Messenger { + return holder.fullArchiveNetworkMessenger +} + +// FullArchivePreferredPeersHolderHandler returns the full archive preferred peers holder +func (holder *networkComponentsHolder) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + return holder.fullArchivePreferredPeersHolderHandler +} + +func (holder *networkComponentsHolder) collectClosableComponents() { + holder.closeHandler.AddComponent(holder.networkMessenger) + holder.closeHandler.AddComponent(holder.inputAntiFloodHandler) + holder.closeHandler.AddComponent(holder.outputAntiFloodHandler) + holder.closeHandler.AddComponent(holder.peerHonestyHandler) + holder.closeHandler.AddComponent(holder.fullArchiveNetworkMessenger) +} + +// Close will call the Close methods on all inner components +func (holder *networkComponentsHolder) Close() error { + return holder.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (holder *networkComponentsHolder) IsInterfaceNil() bool { + return holder == nil +} + +// Create will do nothing +func (holder *networkComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (holder *networkComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (holder *networkComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/networkComponents_test.go b/node/chainSimulator/components/networkComponents_test.go new file mode 100644 index 00000000000..9c184d4d608 --- /dev/null +++ b/node/chainSimulator/components/networkComponents_test.go @@ -0,0 +1,62 @@ +package components + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCreateNetworkComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateNetworkComponents(NewSyncedBroadcastNetwork()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("nil network should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateNetworkComponents(nil) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestNetworkComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *networkComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateNetworkComponents(NewSyncedBroadcastNetwork()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestNetworkComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateNetworkComponents(NewSyncedBroadcastNetwork()) + require.NoError(t, err) + + require.NotNil(t, comp.NetworkMessenger()) + require.NotNil(t, comp.InputAntiFloodHandler()) + require.NotNil(t, comp.OutputAntiFloodHandler()) + require.NotNil(t, comp.PubKeyCacher()) + require.NotNil(t, comp.PeerBlackListHandler()) + require.NotNil(t, comp.PeerHonestyHandler()) + require.NotNil(t, comp.PreferredPeersHolderHandler()) + require.NotNil(t, comp.PeersRatingHandler()) + require.NotNil(t, comp.PeersRatingMonitor()) + require.NotNil(t, comp.FullArchiveNetworkMessenger()) + require.NotNil(t, comp.FullArchivePreferredPeersHolderHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/nodeFacade.go b/node/chainSimulator/components/nodeFacade.go new file mode 100644 index 00000000000..d62814fdf03 --- /dev/null +++ b/node/chainSimulator/components/nodeFacade.go @@ -0,0 +1,190 @@ +package components + +import ( + "errors" + "fmt" + "strconv" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/api/gin" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/facade" + apiComp "github.com/multiversx/mx-chain-go/factory/api" + nodePack "github.com/multiversx/mx-chain-go/node" + "github.com/multiversx/mx-chain-go/node/metrics" + "github.com/multiversx/mx-chain-go/process/mock" +) + +func (node *testOnlyProcessingNode) createFacade(configs config.Configs, apiInterface APIConfigurator, vmQueryDelayAfterStartInMs uint64) error { + log.Debug("creating api resolver structure") + + err := node.createMetrics(configs) + if err != nil { + return err + } + + argsGasScheduleNotifier := forking.ArgsNewGasScheduleNotifier{ + GasScheduleConfig: configs.EpochConfig.GasSchedule, + ConfigDir: configs.ConfigurationPathsHolder.GasScheduleDirectoryName, + EpochNotifier: node.CoreComponentsHolder.EpochNotifier(), + WasmVMChangeLocker: node.CoreComponentsHolder.WasmVMChangeLocker(), + } + gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasScheduleNotifier) + if err != nil { + return err + } + + allowVMQueriesChan := make(chan struct{}) + go func() { + time.Sleep(time.Duration(vmQueryDelayAfterStartInMs) * time.Millisecond) + close(allowVMQueriesChan) + node.StatusCoreComponents.AppStatusHandler().SetStringValue(common.MetricAreVMQueriesReady, strconv.FormatBool(true)) + }() + + apiResolverArgs := &apiComp.ApiResolverArgs{ + Configs: &configs, + CoreComponents: node.CoreComponentsHolder, + DataComponents: node.DataComponentsHolder, + StateComponents: node.StateComponentsHolder, + BootstrapComponents: node.BootstrapComponentsHolder, + CryptoComponents: node.CryptoComponentsHolder, + ProcessComponents: node.ProcessComponentsHolder, + StatusCoreComponents: node.StatusCoreComponents, + GasScheduleNotifier: gasScheduleNotifier, + Bootstrapper: &mock.BootstrapperStub{ + GetNodeStateCalled: func() common.NodeState { + return common.NsSynchronized + }, + }, + AllowVMQueriesChan: allowVMQueriesChan, + StatusComponents: node.StatusComponentsHolder, + ProcessingMode: common.GetNodeProcessingMode(configs.ImportDbConfig), + } + + apiResolver, err := apiComp.CreateApiResolver(apiResolverArgs) + if err != nil { + return err + } + + log.Debug("creating multiversx node facade") + + flagsConfig := configs.FlagsConfig + + nd, err := nodePack.NewNode( + nodePack.WithStatusCoreComponents(node.StatusCoreComponents), + nodePack.WithCoreComponents(node.CoreComponentsHolder), + nodePack.WithCryptoComponents(node.CryptoComponentsHolder), + nodePack.WithBootstrapComponents(node.BootstrapComponentsHolder), + nodePack.WithStateComponents(node.StateComponentsHolder), + nodePack.WithDataComponents(node.DataComponentsHolder), + nodePack.WithStatusComponents(node.StatusComponentsHolder), + nodePack.WithProcessComponents(node.ProcessComponentsHolder), + nodePack.WithNetworkComponents(node.NetworkComponentsHolder), + nodePack.WithInitialNodesPubKeys(node.CoreComponentsHolder.GenesisNodesSetup().InitialNodesPubKeys()), + nodePack.WithRoundDuration(node.CoreComponentsHolder.GenesisNodesSetup().GetRoundDuration()), + nodePack.WithConsensusGroupSize(int(node.CoreComponentsHolder.GenesisNodesSetup().GetShardConsensusGroupSize())), + nodePack.WithGenesisTime(node.CoreComponentsHolder.GenesisTime()), + nodePack.WithConsensusType(configs.GeneralConfig.Consensus.Type), + nodePack.WithRequestedItemsHandler(node.ProcessComponentsHolder.RequestedItemsHandler()), + nodePack.WithAddressSignatureSize(configs.GeneralConfig.AddressPubkeyConverter.SignatureLength), + nodePack.WithValidatorSignatureSize(configs.GeneralConfig.ValidatorPubkeyConverter.SignatureLength), + nodePack.WithPublicKeySize(configs.GeneralConfig.ValidatorPubkeyConverter.Length), + nodePack.WithNodeStopChannel(node.CoreComponentsHolder.ChanStopNodeProcess()), + nodePack.WithImportMode(configs.ImportDbConfig.IsImportDBMode), + nodePack.WithESDTNFTStorageHandler(node.ProcessComponentsHolder.ESDTDataStorageHandlerForAPI()), + ) + if err != nil { + return errors.New("error creating node: " + err.Error()) + } + + shardID := node.GetShardCoordinator().SelfId() + restApiInterface := apiInterface.RestApiInterface(shardID) + + argNodeFacade := facade.ArgNodeFacade{ + Node: nd, + ApiResolver: apiResolver, + RestAPIServerDebugMode: flagsConfig.EnableRestAPIServerDebugMode, + WsAntifloodConfig: configs.GeneralConfig.WebServerAntiflood, + FacadeConfig: config.FacadeConfig{ + RestApiInterface: restApiInterface, + PprofEnabled: flagsConfig.EnablePprof, + }, + ApiRoutesConfig: *configs.ApiRoutesConfig, + AccountsState: node.StateComponentsHolder.AccountsAdapter(), + PeerState: node.StateComponentsHolder.PeerAccounts(), + Blockchain: node.DataComponentsHolder.Blockchain(), + } + + ef, err := facade.NewNodeFacade(argNodeFacade) + if err != nil { + return fmt.Errorf("%w while creating NodeFacade", err) + } + + ef.SetSyncer(node.CoreComponentsHolder.SyncTimer()) + + node.facadeHandler = ef + + return nil +} + +func (node *testOnlyProcessingNode) createHttpServer(configs config.Configs) error { + httpServerArgs := gin.ArgsNewWebServer{ + Facade: node.facadeHandler, + ApiConfig: *configs.ApiRoutesConfig, + AntiFloodConfig: configs.GeneralConfig.WebServerAntiflood, + } + + httpServerWrapper, err := gin.NewGinWebServerHandler(httpServerArgs) + if err != nil { + return err + } + + err = httpServerWrapper.StartHttpServer() + if err != nil { + return err + } + + node.httpServer = httpServerWrapper + + return nil +} + +func (node *testOnlyProcessingNode) createMetrics(configs config.Configs) error { + err := metrics.InitMetrics( + node.StatusCoreComponents.AppStatusHandler(), + node.CryptoComponentsHolder.PublicKeyString(), + node.BootstrapComponentsHolder.NodeType(), + node.BootstrapComponentsHolder.ShardCoordinator(), + node.CoreComponentsHolder.GenesisNodesSetup(), + configs.FlagsConfig.Version, + configs.EconomicsConfig, + configs.GeneralConfig.EpochStartConfig.RoundsPerEpoch, + node.CoreComponentsHolder.MinTransactionVersion(), + ) + + if err != nil { + return err + } + + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricNodeDisplayName, configs.PreferencesConfig.Preferences.NodeDisplayName) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricRedundancyLevel, fmt.Sprintf("%d", configs.PreferencesConfig.Preferences.RedundancyLevel)) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricRedundancyIsMainActive, common.MetricValueNA) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricChainId, node.CoreComponentsHolder.ChainID()) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricGasPerDataByte, node.CoreComponentsHolder.EconomicsData().GasPerDataByte()) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricMinGasPrice, node.CoreComponentsHolder.EconomicsData().MinGasPrice()) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricMinGasLimit, node.CoreComponentsHolder.EconomicsData().MinGasLimit()) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricExtraGasLimitGuardedTx, node.CoreComponentsHolder.EconomicsData().ExtraGasLimitGuardedTx()) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricRewardsTopUpGradientPoint, node.CoreComponentsHolder.EconomicsData().RewardsTopUpGradientPoint().String()) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricTopUpFactor, fmt.Sprintf("%g", node.CoreComponentsHolder.EconomicsData().RewardsTopUpFactor())) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricGasPriceModifier, fmt.Sprintf("%g", node.CoreComponentsHolder.EconomicsData().GasPriceModifier())) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricMaxGasPerTransaction, node.CoreComponentsHolder.EconomicsData().MaxGasLimitPerTx()) + if configs.PreferencesConfig.Preferences.FullArchive { + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricPeerType, core.ObserverPeer.String()) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricPeerSubType, core.FullHistoryObserver.String()) + } + + return nil +} diff --git a/node/chainSimulator/components/processComponents.go b/node/chainSimulator/components/processComponents.go new file mode 100644 index 00000000000..6e00d776784 --- /dev/null +++ b/node/chainSimulator/components/processComponents.go @@ -0,0 +1,556 @@ +package components + +import ( + "fmt" + "io" + "math/big" + "path/filepath" + "time" + + "github.com/multiversx/mx-chain-core-go/core/partitioning" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/common/ordering" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dblookupext" + dbLookupFactory "github.com/multiversx/mx-chain-go/dblookupext/factory" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/factory" + processComp "github.com/multiversx/mx-chain-go/factory/processing" + "github.com/multiversx/mx-chain-go/genesis" + "github.com/multiversx/mx-chain-go/genesis/parsing" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/interceptors" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/storage/cache" + "github.com/multiversx/mx-chain-go/update" + "github.com/multiversx/mx-chain-go/update/trigger" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +// ArgsProcessComponentsHolder will hold the components needed for process components +type ArgsProcessComponentsHolder struct { + CoreComponents factory.CoreComponentsHolder + CryptoComponents factory.CryptoComponentsHolder + NetworkComponents factory.NetworkComponentsHolder + BootstrapComponents factory.BootstrapComponentsHolder + StateComponents factory.StateComponentsHolder + DataComponents factory.DataComponentsHolder + StatusComponents factory.StatusComponentsHolder + StatusCoreComponents factory.StatusCoreComponentsHolder + NodesCoordinator nodesCoordinator.NodesCoordinator + + EpochConfig config.EpochConfig + RoundConfig config.RoundConfig + ConfigurationPathsHolder config.ConfigurationPathsHolder + FlagsConfig config.ContextFlagsConfig + ImportDBConfig config.ImportDbConfig + PrefsConfig config.Preferences + Config config.Config + EconomicsConfig config.EconomicsConfig + SystemSCConfig config.SystemSmartContractsConfig + + GenesisNonce uint64 + GenesisRound uint64 +} + +type processComponentsHolder struct { + receiptsRepository factory.ReceiptsRepository + nodesCoordinator nodesCoordinator.NodesCoordinator + shardCoordinator sharding.Coordinator + interceptorsContainer process.InterceptorsContainer + fullArchiveInterceptorsContainer process.InterceptorsContainer + resolversContainer dataRetriever.ResolversContainer + requestersFinder dataRetriever.RequestersFinder + roundHandler consensus.RoundHandler + epochStartTrigger epochStart.TriggerHandler + epochStartNotifier factory.EpochStartNotifier + forkDetector process.ForkDetector + blockProcessor process.BlockProcessor + blackListHandler process.TimeCacher + bootStorer process.BootStorer + headerSigVerifier process.InterceptedHeaderSigVerifier + headerIntegrityVerifier process.HeaderIntegrityVerifier + validatorsStatistics process.ValidatorStatisticsProcessor + validatorsProvider process.ValidatorsProvider + blockTracker process.BlockTracker + pendingMiniBlocksHandler process.PendingMiniBlocksHandler + requestHandler process.RequestHandler + txLogsProcessor process.TransactionLogProcessorDatabase + headerConstructionValidator process.HeaderConstructionValidator + peerShardMapper process.NetworkShardingCollector + fullArchivePeerShardMapper process.NetworkShardingCollector + fallbackHeaderValidator process.FallbackHeaderValidator + apiTransactionEvaluator factory.TransactionEvaluator + whiteListHandler process.WhiteListHandler + whiteListerVerifiedTxs process.WhiteListHandler + historyRepository dblookupext.HistoryRepository + importStartHandler update.ImportStartHandler + requestedItemsHandler dataRetriever.RequestedItemsHandler + nodeRedundancyHandler consensus.NodeRedundancyHandler + currentEpochProvider process.CurrentNetworkEpochProviderHandler + scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler + txsSenderHandler process.TxsSenderHandler + hardforkTrigger factory.HardforkTrigger + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker + esdtDataStorageHandlerForAPI vmcommon.ESDTNFTStorageHandler + accountsParser genesis.AccountsParser + sentSignatureTracker process.SentSignaturesTracker + epochStartSystemSCProcessor process.EpochStartSystemSCProcessor + relayedTxV3Processor process.RelayedTxV3Processor + managedProcessComponentsCloser io.Closer +} + +// CreateProcessComponents will create the process components holder +func CreateProcessComponents(args ArgsProcessComponentsHolder) (*processComponentsHolder, error) { + importStartHandler, err := trigger.NewImportStartHandler(filepath.Join(args.FlagsConfig.DbDir, common.DefaultDBPath), args.FlagsConfig.Version) + if err != nil { + return nil, err + } + totalSupply, ok := big.NewInt(0).SetString(args.EconomicsConfig.GlobalSettings.GenesisTotalSupply, 10) + if !ok { + return nil, fmt.Errorf("can not parse total suply from economics.toml, %s is not a valid value", + args.EconomicsConfig.GlobalSettings.GenesisTotalSupply) + } + + mintingSenderAddress := args.EconomicsConfig.GlobalSettings.GenesisMintingSenderAddress + argsAccountsParser := genesis.AccountsParserArgs{ + GenesisFilePath: args.ConfigurationPathsHolder.Genesis, + EntireSupply: totalSupply, + MinterAddress: mintingSenderAddress, + PubkeyConverter: args.CoreComponents.AddressPubKeyConverter(), + KeyGenerator: args.CryptoComponents.TxSignKeyGen(), + Hasher: args.CoreComponents.Hasher(), + Marshalizer: args.CoreComponents.InternalMarshalizer(), + } + + accountsParser, err := parsing.NewAccountsParser(argsAccountsParser) + if err != nil { + return nil, err + } + + smartContractParser, err := parsing.NewSmartContractsParser( + args.ConfigurationPathsHolder.SmartContracts, + args.CoreComponents.AddressPubKeyConverter(), + args.CryptoComponents.TxSignKeyGen(), + ) + if err != nil { + return nil, err + } + + historyRepoFactoryArgs := &dbLookupFactory.ArgsHistoryRepositoryFactory{ + SelfShardID: args.BootstrapComponents.ShardCoordinator().SelfId(), + Config: args.Config.DbLookupExtensions, + Hasher: args.CoreComponents.Hasher(), + Marshalizer: args.CoreComponents.InternalMarshalizer(), + Store: args.DataComponents.StorageService(), + Uint64ByteSliceConverter: args.CoreComponents.Uint64ByteSliceConverter(), + } + historyRepositoryFactory, err := dbLookupFactory.NewHistoryRepositoryFactory(historyRepoFactoryArgs) + if err != nil { + return nil, err + } + + lruCacheRequest, err := cache.NewLRUCache(int(args.Config.WhiteListPool.Capacity)) + if err != nil { + return nil, err + + } + whiteListHandler, err := interceptors.NewWhiteListDataVerifier(lruCacheRequest) + if err != nil { + return nil, err + } + + lruCacheTx, err := cache.NewLRUCache(int(args.Config.WhiteListerVerifiedTxs.Capacity)) + if err != nil { + return nil, err + + } + whiteListVerifiedTxs, err := interceptors.NewWhiteListDataVerifier(lruCacheTx) + if err != nil { + return nil, err + } + + historyRepository, err := historyRepositoryFactory.Create() + if err != nil { + return nil, err + } + + requestedItemsHandler := cache.NewTimeCache( + time.Duration(uint64(time.Millisecond) * args.CoreComponents.GenesisNodesSetup().GetRoundDuration())) + + txExecutionOrderHandler := ordering.NewOrderedCollection() + + argsGasScheduleNotifier := forking.ArgsNewGasScheduleNotifier{ + GasScheduleConfig: args.EpochConfig.GasSchedule, + ConfigDir: args.ConfigurationPathsHolder.GasScheduleDirectoryName, + EpochNotifier: args.CoreComponents.EpochNotifier(), + WasmVMChangeLocker: args.CoreComponents.WasmVMChangeLocker(), + } + gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasScheduleNotifier) + if err != nil { + return nil, err + } + + processArgs := processComp.ProcessComponentsFactoryArgs{ + Config: args.Config, + EpochConfig: args.EpochConfig, + RoundConfig: args.RoundConfig, + PrefConfigs: args.PrefsConfig, + ImportDBConfig: args.ImportDBConfig, + EconomicsConfig: args.EconomicsConfig, + AccountsParser: accountsParser, + SmartContractParser: smartContractParser, + GasSchedule: gasScheduleNotifier, + NodesCoordinator: args.NodesCoordinator, + RequestedItemsHandler: requestedItemsHandler, + WhiteListHandler: whiteListHandler, + WhiteListerVerifiedTxs: whiteListVerifiedTxs, + MaxRating: 50, + SystemSCConfig: &args.SystemSCConfig, + ImportStartHandler: importStartHandler, + HistoryRepo: historyRepository, + FlagsConfig: args.FlagsConfig, + Data: args.DataComponents, + CoreData: args.CoreComponents, + Crypto: args.CryptoComponents, + State: args.StateComponents, + Network: args.NetworkComponents, + BootstrapComponents: args.BootstrapComponents, + StatusComponents: args.StatusComponents, + StatusCoreComponents: args.StatusCoreComponents, + TxExecutionOrderHandler: txExecutionOrderHandler, + GenesisNonce: args.GenesisNonce, + GenesisRound: args.GenesisRound, + } + processComponentsFactory, err := processComp.NewProcessComponentsFactory(processArgs) + if err != nil { + return nil, fmt.Errorf("NewProcessComponentsFactory failed: %w", err) + } + + managedProcessComponents, err := processComp.NewManagedProcessComponents(processComponentsFactory) + if err != nil { + return nil, err + } + + err = managedProcessComponents.Create() + if err != nil { + return nil, err + } + + instance := &processComponentsHolder{ + receiptsRepository: managedProcessComponents.ReceiptsRepository(), + nodesCoordinator: managedProcessComponents.NodesCoordinator(), + shardCoordinator: managedProcessComponents.ShardCoordinator(), + interceptorsContainer: managedProcessComponents.InterceptorsContainer(), + fullArchiveInterceptorsContainer: managedProcessComponents.FullArchiveInterceptorsContainer(), + resolversContainer: managedProcessComponents.ResolversContainer(), + requestersFinder: managedProcessComponents.RequestersFinder(), + roundHandler: managedProcessComponents.RoundHandler(), + epochStartTrigger: managedProcessComponents.EpochStartTrigger(), + epochStartNotifier: managedProcessComponents.EpochStartNotifier(), + forkDetector: managedProcessComponents.ForkDetector(), + blockProcessor: managedProcessComponents.BlockProcessor(), + blackListHandler: managedProcessComponents.BlackListHandler(), + bootStorer: managedProcessComponents.BootStorer(), + headerSigVerifier: managedProcessComponents.HeaderSigVerifier(), + headerIntegrityVerifier: managedProcessComponents.HeaderIntegrityVerifier(), + validatorsStatistics: managedProcessComponents.ValidatorsStatistics(), + validatorsProvider: managedProcessComponents.ValidatorsProvider(), + blockTracker: managedProcessComponents.BlockTracker(), + pendingMiniBlocksHandler: managedProcessComponents.PendingMiniBlocksHandler(), + requestHandler: managedProcessComponents.RequestHandler(), + txLogsProcessor: managedProcessComponents.TxLogsProcessor(), + headerConstructionValidator: managedProcessComponents.HeaderConstructionValidator(), + peerShardMapper: managedProcessComponents.PeerShardMapper(), + fullArchivePeerShardMapper: managedProcessComponents.FullArchivePeerShardMapper(), + fallbackHeaderValidator: managedProcessComponents.FallbackHeaderValidator(), + apiTransactionEvaluator: managedProcessComponents.APITransactionEvaluator(), + whiteListHandler: managedProcessComponents.WhiteListHandler(), + whiteListerVerifiedTxs: managedProcessComponents.WhiteListerVerifiedTxs(), + historyRepository: managedProcessComponents.HistoryRepository(), + importStartHandler: managedProcessComponents.ImportStartHandler(), + requestedItemsHandler: managedProcessComponents.RequestedItemsHandler(), + nodeRedundancyHandler: managedProcessComponents.NodeRedundancyHandler(), + currentEpochProvider: managedProcessComponents.CurrentEpochProvider(), + scheduledTxsExecutionHandler: managedProcessComponents.ScheduledTxsExecutionHandler(), + txsSenderHandler: managedProcessComponents.TxsSenderHandler(), // warning: this will be replaced + hardforkTrigger: managedProcessComponents.HardforkTrigger(), + processedMiniBlocksTracker: managedProcessComponents.ProcessedMiniBlocksTracker(), + esdtDataStorageHandlerForAPI: managedProcessComponents.ESDTDataStorageHandlerForAPI(), + accountsParser: managedProcessComponents.AccountsParser(), + sentSignatureTracker: managedProcessComponents.SentSignaturesTracker(), + epochStartSystemSCProcessor: managedProcessComponents.EpochSystemSCProcessor(), + relayedTxV3Processor: managedProcessComponents.RelayedTxV3Processor(), + managedProcessComponentsCloser: managedProcessComponents, + } + + return replaceWithCustomProcessSubComponents(instance, processArgs) +} + +func replaceWithCustomProcessSubComponents( + instance *processComponentsHolder, + processArgs processComp.ProcessComponentsFactoryArgs, +) (*processComponentsHolder, error) { + dataPacker, err := partitioning.NewSimpleDataPacker(processArgs.CoreData.InternalMarshalizer()) + if err != nil { + return nil, fmt.Errorf("%w in replaceWithCustomProcessSubComponents", err) + } + + argsSyncedTxsSender := ArgsSyncedTxsSender{ + Marshaller: processArgs.CoreData.InternalMarshalizer(), + ShardCoordinator: processArgs.BootstrapComponents.ShardCoordinator(), + NetworkMessenger: processArgs.Network.NetworkMessenger(), + DataPacker: dataPacker, + } + + instance.txsSenderHandler, err = NewSyncedTxsSender(argsSyncedTxsSender) + if err != nil { + return nil, fmt.Errorf("%w in replaceWithCustomProcessSubComponents", err) + } + + return instance, nil +} + +// SentSignaturesTracker will return the sent signature tracker +func (p *processComponentsHolder) SentSignaturesTracker() process.SentSignaturesTracker { + return p.sentSignatureTracker +} + +// NodesCoordinator will return the nodes coordinator +func (p *processComponentsHolder) NodesCoordinator() nodesCoordinator.NodesCoordinator { + return p.nodesCoordinator +} + +// ShardCoordinator will return the shard coordinator +func (p *processComponentsHolder) ShardCoordinator() sharding.Coordinator { + return p.shardCoordinator +} + +// InterceptorsContainer will return the interceptors container +func (p *processComponentsHolder) InterceptorsContainer() process.InterceptorsContainer { + return p.interceptorsContainer +} + +// FullArchiveInterceptorsContainer will return the full archive interceptor container +func (p *processComponentsHolder) FullArchiveInterceptorsContainer() process.InterceptorsContainer { + return p.fullArchiveInterceptorsContainer +} + +// ResolversContainer will return the resolvers container +func (p *processComponentsHolder) ResolversContainer() dataRetriever.ResolversContainer { + return p.resolversContainer +} + +// RequestersFinder will return the requesters finder +func (p *processComponentsHolder) RequestersFinder() dataRetriever.RequestersFinder { + return p.requestersFinder +} + +// RoundHandler will return the round handler +func (p *processComponentsHolder) RoundHandler() consensus.RoundHandler { + return p.roundHandler +} + +// EpochStartTrigger will return the epoch start trigger +func (p *processComponentsHolder) EpochStartTrigger() epochStart.TriggerHandler { + return p.epochStartTrigger +} + +// EpochStartNotifier will return the epoch start notifier +func (p *processComponentsHolder) EpochStartNotifier() factory.EpochStartNotifier { + return p.epochStartNotifier +} + +// ForkDetector will return the fork detector +func (p *processComponentsHolder) ForkDetector() process.ForkDetector { + return p.forkDetector +} + +// BlockProcessor will return the block processor +func (p *processComponentsHolder) BlockProcessor() process.BlockProcessor { + return p.blockProcessor +} + +// BlackListHandler will return the black list handler +func (p *processComponentsHolder) BlackListHandler() process.TimeCacher { + return p.blackListHandler +} + +// BootStorer will return the boot storer +func (p *processComponentsHolder) BootStorer() process.BootStorer { + return p.bootStorer +} + +// HeaderSigVerifier will return the header sign verifier +func (p *processComponentsHolder) HeaderSigVerifier() process.InterceptedHeaderSigVerifier { + return p.headerSigVerifier +} + +// HeaderIntegrityVerifier will return the header integrity verifier +func (p *processComponentsHolder) HeaderIntegrityVerifier() process.HeaderIntegrityVerifier { + return p.headerIntegrityVerifier +} + +// ValidatorsStatistics will return the validators statistics +func (p *processComponentsHolder) ValidatorsStatistics() process.ValidatorStatisticsProcessor { + return p.validatorsStatistics +} + +// ValidatorsProvider will return the validators provider +func (p *processComponentsHolder) ValidatorsProvider() process.ValidatorsProvider { + return p.validatorsProvider +} + +// BlockTracker will return the block tracker +func (p *processComponentsHolder) BlockTracker() process.BlockTracker { + return p.blockTracker +} + +// PendingMiniBlocksHandler will return the pending miniblocks handler +func (p *processComponentsHolder) PendingMiniBlocksHandler() process.PendingMiniBlocksHandler { + return p.pendingMiniBlocksHandler +} + +// RequestHandler will return the request handler +func (p *processComponentsHolder) RequestHandler() process.RequestHandler { + return p.requestHandler +} + +// TxLogsProcessor will return the transaction log processor +func (p *processComponentsHolder) TxLogsProcessor() process.TransactionLogProcessorDatabase { + return p.txLogsProcessor +} + +// HeaderConstructionValidator will return the header construction validator +func (p *processComponentsHolder) HeaderConstructionValidator() process.HeaderConstructionValidator { + return p.headerConstructionValidator +} + +// PeerShardMapper will return the peer shard mapper +func (p *processComponentsHolder) PeerShardMapper() process.NetworkShardingCollector { + return p.peerShardMapper +} + +// FullArchivePeerShardMapper will return the full archive peer shard mapper +func (p *processComponentsHolder) FullArchivePeerShardMapper() process.NetworkShardingCollector { + return p.fullArchivePeerShardMapper +} + +// FallbackHeaderValidator will return the fallback header validator +func (p *processComponentsHolder) FallbackHeaderValidator() process.FallbackHeaderValidator { + return p.fallbackHeaderValidator +} + +// APITransactionEvaluator will return the api transaction evaluator +func (p *processComponentsHolder) APITransactionEvaluator() factory.TransactionEvaluator { + return p.apiTransactionEvaluator +} + +// WhiteListHandler will return the white list handler +func (p *processComponentsHolder) WhiteListHandler() process.WhiteListHandler { + return p.whiteListHandler +} + +// WhiteListerVerifiedTxs will return the white lister verifier +func (p *processComponentsHolder) WhiteListerVerifiedTxs() process.WhiteListHandler { + return p.whiteListerVerifiedTxs +} + +// HistoryRepository will return the history repository +func (p *processComponentsHolder) HistoryRepository() dblookupext.HistoryRepository { + return p.historyRepository +} + +// ImportStartHandler will return the import start handler +func (p *processComponentsHolder) ImportStartHandler() update.ImportStartHandler { + return p.importStartHandler +} + +// RequestedItemsHandler will return the requested item handler +func (p *processComponentsHolder) RequestedItemsHandler() dataRetriever.RequestedItemsHandler { + return p.requestedItemsHandler +} + +// NodeRedundancyHandler will return the node redundancy handler +func (p *processComponentsHolder) NodeRedundancyHandler() consensus.NodeRedundancyHandler { + return p.nodeRedundancyHandler +} + +// CurrentEpochProvider will return the current epoch provider +func (p *processComponentsHolder) CurrentEpochProvider() process.CurrentNetworkEpochProviderHandler { + return p.currentEpochProvider +} + +// ScheduledTxsExecutionHandler will return the scheduled transactions execution handler +func (p *processComponentsHolder) ScheduledTxsExecutionHandler() process.ScheduledTxsExecutionHandler { + return p.scheduledTxsExecutionHandler +} + +// TxsSenderHandler will return the transactions sender handler +func (p *processComponentsHolder) TxsSenderHandler() process.TxsSenderHandler { + return p.txsSenderHandler +} + +// HardforkTrigger will return the hardfork trigger +func (p *processComponentsHolder) HardforkTrigger() factory.HardforkTrigger { + return p.hardforkTrigger +} + +// ProcessedMiniBlocksTracker will return the processed miniblocks tracker +func (p *processComponentsHolder) ProcessedMiniBlocksTracker() process.ProcessedMiniBlocksTracker { + return p.processedMiniBlocksTracker +} + +// ESDTDataStorageHandlerForAPI will return the esdt data storage handler for api +func (p *processComponentsHolder) ESDTDataStorageHandlerForAPI() vmcommon.ESDTNFTStorageHandler { + return p.esdtDataStorageHandlerForAPI +} + +// AccountsParser will return the accounts parser +func (p *processComponentsHolder) AccountsParser() genesis.AccountsParser { + return p.accountsParser +} + +// ReceiptsRepository returns the receipts repository +func (p *processComponentsHolder) ReceiptsRepository() factory.ReceiptsRepository { + return p.receiptsRepository +} + +// EpochSystemSCProcessor returns the epoch start system SC processor +func (p *processComponentsHolder) EpochSystemSCProcessor() process.EpochStartSystemSCProcessor { + return p.epochStartSystemSCProcessor +} + +// RelayedTxV3Processor returns the relayed tx v3 processor +func (p *processComponentsHolder) RelayedTxV3Processor() process.RelayedTxV3Processor { + return p.relayedTxV3Processor +} + +// Close will call the Close methods on all inner components +func (p *processComponentsHolder) Close() error { + return p.managedProcessComponentsCloser.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (p *processComponentsHolder) IsInterfaceNil() bool { + return p == nil +} + +// Create will do nothing +func (p *processComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (p *processComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (p *processComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/processComponents_test.go b/node/chainSimulator/components/processComponents_test.go new file mode 100644 index 00000000000..93b6a7689a3 --- /dev/null +++ b/node/chainSimulator/components/processComponents_test.go @@ -0,0 +1,416 @@ +package components + +import ( + "math/big" + "sync" + "testing" + + coreData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/hashing/blake2b" + "github.com/multiversx/mx-chain-core-go/hashing/keccak" + "github.com/multiversx/mx-chain-core-go/marshal" + commonFactory "github.com/multiversx/mx-chain-go/common/factory" + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" + "github.com/multiversx/mx-chain-go/config" + retriever "github.com/multiversx/mx-chain-go/dataRetriever" + mockFactory "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/sharding" + chainStorage "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" + "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" + "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" + "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storage" + updateMocks "github.com/multiversx/mx-chain-go/update/mock" + "github.com/stretchr/testify/require" +) + +const testingProtocolSustainabilityAddress = "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" + +var ( + addrPubKeyConv, _ = commonFactory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 32, + Type: "bech32", + SignatureLength: 0, + Hrp: "erd", + }) + valPubKeyConv, _ = commonFactory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 96, + Type: "hex", + SignatureLength: 48, + }) +) + +func createArgsProcessComponentsHolder() ArgsProcessComponentsHolder { + nodesSetup, _ := sharding.NewNodesSetup("../../../integrationTests/factory/testdata/nodesSetup.json", addrPubKeyConv, valPubKeyConv, 3) + + args := ArgsProcessComponentsHolder{ + Config: testscommon.GetGeneralConfig(), + EpochConfig: config.EpochConfig{ + GasSchedule: config.GasScheduleConfig{ + GasScheduleByEpochs: []config.GasScheduleByEpochs{ + { + StartEpoch: 0, + FileName: "../../../cmd/node/config/gasSchedules/gasScheduleV8.toml", + }, + }, + }, + }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + PrefsConfig: config.Preferences{}, + ImportDBConfig: config.ImportDbConfig{}, + FlagsConfig: config.ContextFlagsConfig{ + Version: "v1.0.0", + }, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + SystemSCConfig: config.SystemSmartContractsConfig{ + ESDTSystemSCConfig: config.ESDTSystemSCConfig{ + BaseIssuingCost: "1000", + OwnerAddress: "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", + }, + GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + NumNodes: 100, + MinQuorum: 50, + MinPassThreshold: 50, + MinVetoThreshold: 50, + }, + Active: config.GovernanceSystemSCConfigActive{ + ProposalCost: "500", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, + }, + OwnerAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + StakingSystemSCConfig: config.StakingSystemSCConfig{ + GenesisNodePrice: "2500000000000000000000", + MinStakeValue: "1", + UnJailValue: "1", + MinStepValue: "1", + UnBondPeriod: 0, + NumRoundsWithoutBleed: 0, + MaximumPercentageToBleed: 0, + BleedPercentagePerRound: 0, + MaxNumberOfNodesForStake: 10, + ActivateBLSPubKeyMessageVerification: false, + MinUnstakeTokensValue: "1", + NodeLimitPercentage: 0.1, + StakeLimitPercentage: 1, + UnBondPeriodInEpochs: 10, + }, + DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ + MinCreationDeposit: "100", + MinStakeAmount: "100", + ConfigChangeAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + DelegationSystemSCConfig: config.DelegationSystemSCConfig{ + MinServiceFee: 0, + MaxServiceFee: 100, + }, + }, + DataComponents: &mock.DataComponentsStub{ + DataPool: dataRetriever.NewPoolsHolderMock(), + BlockChain: &testscommon.ChainHandlerStub{ + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis hash") + }, + GetGenesisHeaderCalled: func() coreData.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, + MbProvider: &mock.MiniBlocksProviderStub{}, + Store: genericMocks.NewChainStorerMock(0), + }, + CoreComponents: &mockFactory.CoreComponentsMock{ + IntMarsh: &marshal.GogoProtoMarshalizer{}, + TxMarsh: &marshal.JsonMarshalizer{}, + UInt64ByteSliceConv: &mock.Uint64ByteSliceConverterMock{}, + AddrPubKeyConv: addrPubKeyConv, + ValPubKeyConv: valPubKeyConv, + NodesConfig: nodesSetup, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{ + ProtocolSustainabilityAddressCalled: func() string { + return testingProtocolSustainabilityAddress + }, + GenesisTotalSupplyCalled: func() *big.Int { + return big.NewInt(0).Mul(big.NewInt(1000000000000000000), big.NewInt(20000000)) + }, + }, + Hash: blake2b.NewBlake2b(), + TxVersionCheckHandler: &testscommon.TxVersionCheckerStub{}, + RatingHandler: &testscommon.RaterMock{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, + EpochNotifierWithConfirm: &updateMocks.EpochStartNotifierStub{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + RoundChangeNotifier: &epochNotifier.RoundNotifierStub{}, + ChanStopProcess: make(chan endProcess.ArgEndProcess, 1), + TxSignHasherField: keccak.NewKeccak(), + HardforkTriggerPubKeyField: []byte("hardfork pub key"), + WasmVMChangeLockerInternal: &sync.RWMutex{}, + NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + RatingsConfig: &testscommon.RatingsInfoMock{}, + PathHdl: &testscommon.PathManagerStub{}, + ProcessStatusHandlerInternal: &testscommon.ProcessStatusHandlerStub{}, + }, + CryptoComponents: &mock.CryptoComponentsStub{ + BlKeyGen: &cryptoMocks.KeyGenStub{}, + BlockSig: &cryptoMocks.SingleSignerStub{}, + MultiSigContainer: &cryptoMocks.MultiSignerContainerMock{ + MultiSigner: &cryptoMocks.MultisignerMock{}, + }, + PrivKey: &cryptoMocks.PrivateKeyStub{}, + PubKey: &cryptoMocks.PublicKeyStub{}, + PubKeyString: "pub key string", + PubKeyBytes: []byte("pub key bytes"), + TxKeyGen: &cryptoMocks.KeyGenStub{}, + TxSig: &cryptoMocks.SingleSignerStub{}, + PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, + MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, + ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + KeysHandlerField: &testscommon.KeysHandlerStub{}, + }, + NetworkComponents: &mock.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + InputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + }, + BootstrapComponents: &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: mock.NewMultiShardsCoordinatorMock(2), + BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, + HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + GuardedAccountHandlerField: &guardianMocks.GuardedAccountHandlerStub{}, + VersionedHdrFactory: &testscommon.VersionedHeaderFactoryStub{}, + }, + StatusComponents: &mock.StatusComponentsStub{ + Outport: &outport.OutportStub{}, + }, + StatusCoreComponents: &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: disabledStatistics.NewStateStatistics(), + }, + EconomicsConfig: config.EconomicsConfig{ + GlobalSettings: config.GlobalSettings{ + GenesisTotalSupply: "20000000000000000000000000", + MinimumInflation: 0, + GenesisMintingSenderAddress: "erd17rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rcqqkhty3", + YearSettings: []*config.YearSetting{ + { + Year: 0, + MaximumInflation: 0.01, + }, + }, + }, + }, + ConfigurationPathsHolder: config.ConfigurationPathsHolder{ + Genesis: "../../../integrationTests/factory/testdata/genesis.json", + SmartContracts: "../../../integrationTests/factory/testdata/genesisSmartContracts.json", + Nodes: "../../../integrationTests/factory/testdata/genesis.json", + }, + } + + args.StateComponents = components.GetStateComponents(args.CoreComponents, args.StatusCoreComponents) + return args +} + +func TestCreateProcessComponents(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("should work", func(t *testing.T) { + comp, err := CreateProcessComponents(createArgsProcessComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewImportStartHandler failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.FlagsConfig.Version = "" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("total supply conversion failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.EconomicsConfig.GlobalSettings.GenesisTotalSupply = "invalid number" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewAccountsParser failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.ConfigurationPathsHolder.Genesis = "" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewSmartContractsParser failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.ConfigurationPathsHolder.SmartContracts = "" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewHistoryRepositoryFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + dataMock, ok := args.DataComponents.(*mock.DataComponentsStub) + require.True(t, ok) + dataMock.Store = nil + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("historyRepositoryFactory.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.Config.DbLookupExtensions.Enabled = true + dataMock, ok := args.DataComponents.(*mock.DataComponentsStub) + require.True(t, ok) + dataMock.Store = &storage.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (chainStorage.Storer, error) { + if unitType == retriever.ESDTSuppliesUnit { + return nil, expectedErr + } + return &storage.StorerStub{}, nil + }, + } + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewGasScheduleNotifier failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.EpochConfig.GasSchedule = config.GasScheduleConfig{} + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewProcessComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + dataMock, ok := args.DataComponents.(*mock.DataComponentsStub) + require.True(t, ok) + dataMock.BlockChain = nil + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedProcessComponents.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.NodesCoordinator = nil + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestProcessComponentsHolder_IsInterfaceNil(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + var comp *processComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateProcessComponents(createArgsProcessComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestProcessComponentsHolder_Getters(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + comp, err := CreateProcessComponents(createArgsProcessComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.SentSignaturesTracker()) + require.NotNil(t, comp.NodesCoordinator()) + require.NotNil(t, comp.ShardCoordinator()) + require.NotNil(t, comp.InterceptorsContainer()) + require.NotNil(t, comp.FullArchiveInterceptorsContainer()) + require.NotNil(t, comp.ResolversContainer()) + require.NotNil(t, comp.RequestersFinder()) + require.NotNil(t, comp.RoundHandler()) + require.NotNil(t, comp.EpochStartTrigger()) + require.NotNil(t, comp.EpochStartNotifier()) + require.NotNil(t, comp.ForkDetector()) + require.NotNil(t, comp.BlockProcessor()) + require.NotNil(t, comp.BlackListHandler()) + require.NotNil(t, comp.BootStorer()) + require.NotNil(t, comp.HeaderSigVerifier()) + require.NotNil(t, comp.HeaderIntegrityVerifier()) + require.NotNil(t, comp.ValidatorsStatistics()) + require.NotNil(t, comp.ValidatorsProvider()) + require.NotNil(t, comp.BlockTracker()) + require.NotNil(t, comp.PendingMiniBlocksHandler()) + require.NotNil(t, comp.RequestHandler()) + require.NotNil(t, comp.TxLogsProcessor()) + require.NotNil(t, comp.HeaderConstructionValidator()) + require.NotNil(t, comp.PeerShardMapper()) + require.NotNil(t, comp.FullArchivePeerShardMapper()) + require.NotNil(t, comp.FallbackHeaderValidator()) + require.NotNil(t, comp.APITransactionEvaluator()) + require.NotNil(t, comp.WhiteListHandler()) + require.NotNil(t, comp.WhiteListerVerifiedTxs()) + require.NotNil(t, comp.HistoryRepository()) + require.NotNil(t, comp.ImportStartHandler()) + require.NotNil(t, comp.RequestedItemsHandler()) + require.NotNil(t, comp.NodeRedundancyHandler()) + require.NotNil(t, comp.CurrentEpochProvider()) + require.NotNil(t, comp.ScheduledTxsExecutionHandler()) + require.NotNil(t, comp.TxsSenderHandler()) + require.NotNil(t, comp.HardforkTrigger()) + require.NotNil(t, comp.ProcessedMiniBlocksTracker()) + require.NotNil(t, comp.ESDTDataStorageHandlerForAPI()) + require.NotNil(t, comp.AccountsParser()) + require.NotNil(t, comp.ReceiptsRepository()) + require.NotNil(t, comp.EpochSystemSCProcessor()) + require.NotNil(t, comp.RelayedTxV3Processor()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/stateComponents.go b/node/chainSimulator/components/stateComponents.go new file mode 100644 index 00000000000..b3fddf55f40 --- /dev/null +++ b/node/chainSimulator/components/stateComponents.go @@ -0,0 +1,135 @@ +package components + +import ( + "io" + + chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/factory" + factoryState "github.com/multiversx/mx-chain-go/factory/state" + "github.com/multiversx/mx-chain-go/state" +) + +// ArgsStateComponents will hold the components needed for state components +type ArgsStateComponents struct { + Config config.Config + CoreComponents factory.CoreComponentsHolder + StatusCore factory.StatusCoreComponentsHolder + StoreService dataRetriever.StorageService + ChainHandler chainData.ChainHandler +} + +type stateComponentsHolder struct { + peerAccount state.AccountsAdapter + accountsAdapter state.AccountsAdapter + accountsAdapterAPI state.AccountsAdapter + accountsRepository state.AccountsRepository + triesContainer common.TriesHolder + triesStorageManager map[string]common.StorageManager + missingTrieNodesNotifier common.MissingTrieNodesNotifier + stateComponentsCloser io.Closer +} + +// CreateStateComponents will create the state components holder +func CreateStateComponents(args ArgsStateComponents) (*stateComponentsHolder, error) { + stateComponentsFactory, err := factoryState.NewStateComponentsFactory(factoryState.StateComponentsFactoryArgs{ + Config: args.Config, + Core: args.CoreComponents, + StatusCore: args.StatusCore, + StorageService: args.StoreService, + ProcessingMode: common.Normal, + ShouldSerializeSnapshots: false, + ChainHandler: args.ChainHandler, + }) + if err != nil { + return nil, err + } + + stateComp, err := factoryState.NewManagedStateComponents(stateComponentsFactory) + if err != nil { + return nil, err + } + + err = stateComp.Create() + if err != nil { + return nil, err + } + + err = stateComp.CheckSubcomponents() + if err != nil { + return nil, err + } + + return &stateComponentsHolder{ + peerAccount: stateComp.PeerAccounts(), + accountsAdapter: stateComp.AccountsAdapter(), + accountsAdapterAPI: stateComp.AccountsAdapterAPI(), + accountsRepository: stateComp.AccountsRepository(), + triesContainer: stateComp.TriesContainer(), + triesStorageManager: stateComp.TrieStorageManagers(), + missingTrieNodesNotifier: stateComp.MissingTrieNodesNotifier(), + stateComponentsCloser: stateComp, + }, nil +} + +// PeerAccounts will return peer accounts +func (s *stateComponentsHolder) PeerAccounts() state.AccountsAdapter { + return s.peerAccount +} + +// AccountsAdapter will return accounts adapter +func (s *stateComponentsHolder) AccountsAdapter() state.AccountsAdapter { + return s.accountsAdapter +} + +// AccountsAdapterAPI will return accounts adapter api +func (s *stateComponentsHolder) AccountsAdapterAPI() state.AccountsAdapter { + return s.accountsAdapterAPI +} + +// AccountsRepository will return accounts repository +func (s *stateComponentsHolder) AccountsRepository() state.AccountsRepository { + return s.accountsRepository +} + +// TriesContainer will return tries container +func (s *stateComponentsHolder) TriesContainer() common.TriesHolder { + return s.triesContainer +} + +// TrieStorageManagers will return trie storage managers +func (s *stateComponentsHolder) TrieStorageManagers() map[string]common.StorageManager { + return s.triesStorageManager +} + +// MissingTrieNodesNotifier will return missing trie nodes notifier +func (s *stateComponentsHolder) MissingTrieNodesNotifier() common.MissingTrieNodesNotifier { + return s.missingTrieNodesNotifier +} + +// Close will close the state components +func (s *stateComponentsHolder) Close() error { + return s.stateComponentsCloser.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *stateComponentsHolder) IsInterfaceNil() bool { + return s == nil +} + +// Create will do nothing +func (s *stateComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (s *stateComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (s *stateComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/stateComponents_test.go b/node/chainSimulator/components/stateComponents_test.go new file mode 100644 index 00000000000..5422d2ea352 --- /dev/null +++ b/node/chainSimulator/components/stateComponents_test.go @@ -0,0 +1,99 @@ +package components + +import ( + "testing" + + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" + mockFactory "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func createArgsStateComponents() ArgsStateComponents { + return ArgsStateComponents{ + Config: testscommon.GetGeneralConfig(), + CoreComponents: &mockFactory.CoreComponentsMock{ + IntMarsh: &testscommon.MarshallerStub{}, + Hash: &testscommon.HasherStub{}, + PathHdl: &testscommon.PathManagerStub{}, + ProcessStatusHandlerInternal: &testscommon.ProcessStatusHandlerStub{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, + }, + StatusCore: &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: disabledStatistics.NewStateStatistics(), + }, + StoreService: genericMocks.NewChainStorerMock(0), + ChainHandler: &testscommon.ChainHandlerStub{}, + } +} + +func TestCreateStateComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStateComponents(createArgsStateComponents()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewStateComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsStateComponents() + args.CoreComponents = nil + comp, err := CreateStateComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("stateComp.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsStateComponents() + coreMock, ok := args.CoreComponents.(*mockFactory.CoreComponentsMock) + require.True(t, ok) + coreMock.EnableEpochsHandlerField = nil + comp, err := CreateStateComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestStateComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *stateComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateStateComponents(createArgsStateComponents()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestStateComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateStateComponents(createArgsStateComponents()) + require.NoError(t, err) + + require.NotNil(t, comp.PeerAccounts()) + require.NotNil(t, comp.AccountsAdapter()) + require.NotNil(t, comp.AccountsAdapterAPI()) + require.NotNil(t, comp.AccountsRepository()) + require.NotNil(t, comp.TriesContainer()) + require.NotNil(t, comp.TrieStorageManagers()) + require.NotNil(t, comp.MissingTrieNodesNotifier()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/statusComponents.go b/node/chainSimulator/components/statusComponents.go new file mode 100644 index 00000000000..be094472fc1 --- /dev/null +++ b/node/chainSimulator/components/statusComponents.go @@ -0,0 +1,211 @@ +package components + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/appStatusPolling" + "github.com/multiversx/mx-chain-core-go/core/check" + factoryMarshalizer "github.com/multiversx/mx-chain-core-go/marshal/factory" + indexerFactory "github.com/multiversx/mx-chain-es-indexer-go/process/factory" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/outport/factory" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/testscommon" +) + +type statusComponentsHolder struct { + closeHandler *closeHandler + outportHandler outport.OutportHandler + softwareVersionChecker statistics.SoftwareVersionChecker + managedPeerMonitor common.ManagedPeersMonitor + appStatusHandler core.AppStatusHandler + forkDetector process.ForkDetector + statusPollingIntervalSec int + cancelFunc func() + mutex sync.RWMutex +} + +// CreateStatusComponents will create a new instance of status components holder +func CreateStatusComponents(shardID uint32, appStatusHandler core.AppStatusHandler, statusPollingIntervalSec int, external config.ExternalConfig, coreComponents process.CoreComponentsHolder) (*statusComponentsHolder, error) { + if check.IfNil(appStatusHandler) { + return nil, core.ErrNilAppStatusHandler + } + + var err error + instance := &statusComponentsHolder{ + closeHandler: NewCloseHandler(), + appStatusHandler: appStatusHandler, + statusPollingIntervalSec: statusPollingIntervalSec, + } + + hostDriverArgs, err := makeHostDriversArgs(external) + if err != nil { + return nil, err + } + instance.outportHandler, err = factory.CreateOutport(&factory.OutportFactoryArgs{ + IsImportDB: false, + ShardID: shardID, + RetrialInterval: time.Second, + HostDriversArgs: hostDriverArgs, + EventNotifierFactoryArgs: &factory.EventNotifierFactoryArgs{}, + ElasticIndexerFactoryArgs: makeElasticIndexerArgs(external, coreComponents), + }) + if err != nil { + return nil, err + } + instance.softwareVersionChecker = &mock.SoftwareVersionCheckerMock{} + instance.managedPeerMonitor = &testscommon.ManagedPeersMonitorStub{} + + instance.collectClosableComponents() + + return instance, nil +} + +func makeHostDriversArgs(external config.ExternalConfig) ([]factory.ArgsHostDriverFactory, error) { + argsHostDriverFactorySlice := make([]factory.ArgsHostDriverFactory, 0, len(external.HostDriversConfig)) + for idx := 0; idx < len(external.HostDriversConfig); idx++ { + hostConfig := external.HostDriversConfig[idx] + if !hostConfig.Enabled { + continue + } + + marshaller, err := factoryMarshalizer.NewMarshalizer(hostConfig.MarshallerType) + if err != nil { + return argsHostDriverFactorySlice, err + } + + argsHostDriverFactorySlice = append(argsHostDriverFactorySlice, factory.ArgsHostDriverFactory{ + Marshaller: marshaller, + HostConfig: hostConfig, + }) + } + + return argsHostDriverFactorySlice, nil +} + +func makeElasticIndexerArgs(external config.ExternalConfig, coreComponents process.CoreComponentsHolder) indexerFactory.ArgsIndexerFactory { + elasticSearchConfig := external.ElasticSearchConnector + return indexerFactory.ArgsIndexerFactory{ + Enabled: elasticSearchConfig.Enabled, + BulkRequestMaxSize: elasticSearchConfig.BulkRequestMaxSizeInBytes, + Url: elasticSearchConfig.URL, + UserName: elasticSearchConfig.Username, + Password: elasticSearchConfig.Password, + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + AddressPubkeyConverter: coreComponents.AddressPubKeyConverter(), + ValidatorPubkeyConverter: coreComponents.ValidatorPubKeyConverter(), + EnabledIndexes: elasticSearchConfig.EnabledIndexes, + Denomination: 18, + UseKibana: elasticSearchConfig.UseKibana, + ImportDB: false, + HeaderMarshaller: coreComponents.InternalMarshalizer(), + } +} + +// OutportHandler will return the outport handler +func (s *statusComponentsHolder) OutportHandler() outport.OutportHandler { + return s.outportHandler +} + +// SoftwareVersionChecker will return the software version checker +func (s *statusComponentsHolder) SoftwareVersionChecker() statistics.SoftwareVersionChecker { + return s.softwareVersionChecker +} + +// ManagedPeersMonitor will return the managed peers monitor +func (s *statusComponentsHolder) ManagedPeersMonitor() common.ManagedPeersMonitor { + return s.managedPeerMonitor +} + +func (s *statusComponentsHolder) collectClosableComponents() { + s.closeHandler.AddComponent(s.outportHandler) + s.closeHandler.AddComponent(s.softwareVersionChecker) +} + +// Close will call the Close methods on all inner components +func (s *statusComponentsHolder) Close() error { + if s.cancelFunc != nil { + s.cancelFunc() + } + + return s.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *statusComponentsHolder) IsInterfaceNil() bool { + return s == nil +} + +// Create will do nothing +func (s *statusComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (s *statusComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (s *statusComponentsHolder) String() string { + return "" +} + +// SetForkDetector will set the fork detector +func (s *statusComponentsHolder) SetForkDetector(forkDetector process.ForkDetector) error { + if check.IfNil(forkDetector) { + return process.ErrNilForkDetector + } + + s.mutex.Lock() + s.forkDetector = forkDetector + s.mutex.Unlock() + + return nil +} + +// StartPolling starts polling for the updated status +func (s *statusComponentsHolder) StartPolling() error { + if check.IfNil(s.forkDetector) { + return process.ErrNilForkDetector + } + + var ctx context.Context + ctx, s.cancelFunc = context.WithCancel(context.Background()) + + appStatusPollingHandler, err := appStatusPolling.NewAppStatusPolling( + s.appStatusHandler, + time.Duration(s.statusPollingIntervalSec)*time.Second, + log, + ) + if err != nil { + return errors.ErrStatusPollingInit + } + + err = appStatusPollingHandler.RegisterPollingFunc(s.probableHighestNonceHandler) + if err != nil { + return fmt.Errorf("%w, cannot register handler func for forkdetector's probable higher nonce", err) + } + + appStatusPollingHandler.Poll(ctx) + + return nil +} + +func (s *statusComponentsHolder) probableHighestNonceHandler(appStatusHandler core.AppStatusHandler) { + s.mutex.RLock() + probableHigherNonce := s.forkDetector.ProbableHighestNonce() + s.mutex.RUnlock() + + appStatusHandler.SetUInt64Value(common.MetricProbableHighestNonce, probableHigherNonce) +} diff --git a/node/chainSimulator/components/statusComponents_test.go b/node/chainSimulator/components/statusComponents_test.go new file mode 100644 index 00000000000..24f3b4595c1 --- /dev/null +++ b/node/chainSimulator/components/statusComponents_test.go @@ -0,0 +1,136 @@ +package components + +import ( + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + mxErrors "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func TestCreateStatusComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5, config.ExternalConfig{}, &mock.CoreComponentsStub{}) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, nil, 5, config.ExternalConfig{}, &mock.CoreComponentsStub{}) + require.Equal(t, core.ErrNilAppStatusHandler, err) + require.Nil(t, comp) + }) +} + +func TestStatusComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *statusComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5, config.ExternalConfig{}, &mock.CoreComponentsStub{}) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestStatusComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5, config.ExternalConfig{}, &mock.CoreComponentsStub{}) + require.NoError(t, err) + + require.NotNil(t, comp.OutportHandler()) + require.NotNil(t, comp.SoftwareVersionChecker()) + require.NotNil(t, comp.ManagedPeersMonitor()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + + require.Nil(t, comp.Close()) +} +func TestStatusComponentsHolder_SetForkDetector(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5, config.ExternalConfig{}, &mock.CoreComponentsStub{}) + require.NoError(t, err) + + err = comp.SetForkDetector(nil) + require.Equal(t, process.ErrNilForkDetector, err) + + err = comp.SetForkDetector(&mock.ForkDetectorStub{}) + require.NoError(t, err) + + require.Nil(t, comp.Close()) +} + +func TestStatusComponentsHolder_StartPolling(t *testing.T) { + t.Parallel() + + t.Run("nil fork detector should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5, config.ExternalConfig{}, &mock.CoreComponentsStub{}) + require.NoError(t, err) + + err = comp.StartPolling() + require.Equal(t, process.ErrNilForkDetector, err) + }) + t.Run("NewAppStatusPolling failure should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 0, config.ExternalConfig{}, &mock.CoreComponentsStub{}) + require.NoError(t, err) + + err = comp.SetForkDetector(&mock.ForkDetectorStub{}) + require.NoError(t, err) + + err = comp.StartPolling() + require.Equal(t, mxErrors.ErrStatusPollingInit, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedHighestNonce := uint64(123) + providedStatusPollingIntervalSec := 1 + wasSetUInt64ValueCalled := atomic.Flag{} + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + require.Equal(t, common.MetricProbableHighestNonce, key) + require.Equal(t, providedHighestNonce, value) + wasSetUInt64ValueCalled.SetValue(true) + }, + } + comp, err := CreateStatusComponents(0, appStatusHandler, providedStatusPollingIntervalSec, config.ExternalConfig{}, &mock.CoreComponentsStub{}) + require.NoError(t, err) + + forkDetector := &mock.ForkDetectorStub{ + ProbableHighestNonceCalled: func() uint64 { + return providedHighestNonce + }, + } + err = comp.SetForkDetector(forkDetector) + require.NoError(t, err) + + err = comp.StartPolling() + require.NoError(t, err) + + time.Sleep(time.Duration(providedStatusPollingIntervalSec+1) * time.Second) + require.True(t, wasSetUInt64ValueCalled.IsSet()) + + require.Nil(t, comp.Close()) + }) +} diff --git a/node/chainSimulator/components/statusCoreComponents.go b/node/chainSimulator/components/statusCoreComponents.go new file mode 100644 index 00000000000..7ac3b9045fa --- /dev/null +++ b/node/chainSimulator/components/statusCoreComponents.go @@ -0,0 +1,126 @@ +package components + +import ( + "io" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/factory/statusCore" + "github.com/multiversx/mx-chain-go/node/external" +) + +type statusCoreComponentsHolder struct { + resourceMonitor factory.ResourceMonitor + networkStatisticsProvider factory.NetworkStatisticsProvider + trieSyncStatisticsProvider factory.TrieSyncStatisticsProvider + statusHandler core.AppStatusHandler + statusMetrics external.StatusMetricsHandler + persistentStatusHandler factory.PersistentStatusHandler + stateStatisticsHandler common.StateStatisticsHandler + managedStatusCoreComponentsCloser io.Closer +} + +// CreateStatusCoreComponents will create a new instance of factory.StatusCoreComponentsHandler +func CreateStatusCoreComponents(configs config.Configs, coreComponents factory.CoreComponentsHolder) (*statusCoreComponentsHolder, error) { + var err error + + statusCoreComponentsFactory, err := statusCore.NewStatusCoreComponentsFactory(statusCore.StatusCoreComponentsFactoryArgs{ + Config: *configs.GeneralConfig, + EpochConfig: *configs.EpochConfig, + RoundConfig: *configs.RoundConfig, + RatingsConfig: *configs.RatingsConfig, + EconomicsConfig: *configs.EconomicsConfig, + CoreComp: coreComponents, + }) + if err != nil { + return nil, err + } + + managedStatusCoreComponents, err := statusCore.NewManagedStatusCoreComponents(statusCoreComponentsFactory) + if err != nil { + return nil, err + } + + err = managedStatusCoreComponents.Create() + if err != nil { + return nil, err + } + + // stop resource monitor + _ = managedStatusCoreComponents.ResourceMonitor().Close() + + instance := &statusCoreComponentsHolder{ + resourceMonitor: managedStatusCoreComponents.ResourceMonitor(), + networkStatisticsProvider: managedStatusCoreComponents.NetworkStatistics(), + trieSyncStatisticsProvider: managedStatusCoreComponents.TrieSyncStatistics(), + statusHandler: managedStatusCoreComponents.AppStatusHandler(), + statusMetrics: managedStatusCoreComponents.StatusMetrics(), + persistentStatusHandler: managedStatusCoreComponents.PersistentStatusHandler(), + stateStatisticsHandler: managedStatusCoreComponents.StateStatsHandler(), + managedStatusCoreComponentsCloser: managedStatusCoreComponents, + } + + return instance, nil +} + +// StateStatsHandler will return the state statistics handler +func (s *statusCoreComponentsHolder) StateStatsHandler() common.StateStatisticsHandler { + return s.stateStatisticsHandler +} + +// ResourceMonitor will return the resource monitor +func (s *statusCoreComponentsHolder) ResourceMonitor() factory.ResourceMonitor { + return s.resourceMonitor +} + +// NetworkStatistics will return the network statistics provider +func (s *statusCoreComponentsHolder) NetworkStatistics() factory.NetworkStatisticsProvider { + return s.networkStatisticsProvider +} + +// TrieSyncStatistics will return trie sync statistics provider +func (s *statusCoreComponentsHolder) TrieSyncStatistics() factory.TrieSyncStatisticsProvider { + return s.trieSyncStatisticsProvider +} + +// AppStatusHandler will return the status handler +func (s *statusCoreComponentsHolder) AppStatusHandler() core.AppStatusHandler { + return s.statusHandler +} + +// StatusMetrics will return the status metrics handler +func (s *statusCoreComponentsHolder) StatusMetrics() external.StatusMetricsHandler { + return s.statusMetrics +} + +// PersistentStatusHandler will return the persistent status handler +func (s *statusCoreComponentsHolder) PersistentStatusHandler() factory.PersistentStatusHandler { + return s.persistentStatusHandler +} + +// Close will call the Close methods on all inner components +func (s *statusCoreComponentsHolder) Close() error { + return s.managedStatusCoreComponentsCloser.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *statusCoreComponentsHolder) IsInterfaceNil() bool { + return s == nil +} + +// Create will do nothing +func (s *statusCoreComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (s *statusCoreComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (s *statusCoreComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/statusCoreComponents_test.go b/node/chainSimulator/components/statusCoreComponents_test.go new file mode 100644 index 00000000000..a616890644f --- /dev/null +++ b/node/chainSimulator/components/statusCoreComponents_test.go @@ -0,0 +1,113 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/factory/mock" + mockTests "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/stretchr/testify/require" +) + +func createArgs() (config.Configs, factory.CoreComponentsHolder) { + generalCfg := testscommon.GetGeneralConfig() + ratingsCfg := components.CreateDummyRatingsConfig() + economicsCfg := components.CreateDummyEconomicsConfig() + cfg := config.Configs{ + GeneralConfig: &generalCfg, + EpochConfig: &config.EpochConfig{ + GasSchedule: config.GasScheduleConfig{ + GasScheduleByEpochs: []config.GasScheduleByEpochs{ + { + StartEpoch: 0, + FileName: "gasScheduleV1.toml", + }, + }, + }, + }, + RoundConfig: &config.RoundConfig{ + RoundActivations: map[string]config.ActivationRoundByName{ + "Example": { + Round: "18446744073709551615", + }, + }, + }, + RatingsConfig: &ratingsCfg, + EconomicsConfig: &economicsCfg, + } + + return cfg, &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + IntMarsh: &testscommon.MarshallerStub{}, + UInt64ByteSliceConv: &mockTests.Uint64ByteSliceConverterMock{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, + } +} + +func TestCreateStatusCoreComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg, coreComp := createArgs() + comp, err := CreateStatusCoreComponents(cfg, coreComp) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewStatusCoreComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + cfg, _ := createArgs() + comp, err := CreateStatusCoreComponents(cfg, nil) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedStatusCoreComponents.Create failure should error", func(t *testing.T) { + t.Parallel() + + cfg, coreComp := createArgs() + cfg.GeneralConfig.ResourceStats.RefreshIntervalInSec = 0 + comp, err := CreateStatusCoreComponents(cfg, coreComp) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestStatusCoreComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *statusCoreComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + cfg, coreComp := createArgs() + comp, _ = CreateStatusCoreComponents(cfg, coreComp) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestStatusCoreComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + cfg, coreComp := createArgs() + comp, err := CreateStatusCoreComponents(cfg, coreComp) + require.NoError(t, err) + + require.NotNil(t, comp.ResourceMonitor()) + require.NotNil(t, comp.NetworkStatistics()) + require.NotNil(t, comp.TrieSyncStatistics()) + require.NotNil(t, comp.AppStatusHandler()) + require.NotNil(t, comp.StatusMetrics()) + require.NotNil(t, comp.PersistentStatusHandler()) + require.NotNil(t, comp.StateStatsHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) +} diff --git a/node/chainSimulator/components/storageService.go b/node/chainSimulator/components/storageService.go new file mode 100644 index 00000000000..9a2a7c4860f --- /dev/null +++ b/node/chainSimulator/components/storageService.go @@ -0,0 +1,39 @@ +package components + +import ( + "github.com/multiversx/mx-chain-go/dataRetriever" +) + +// CreateStore creates a storage service for shard nodes +func CreateStore(numOfShards uint32) dataRetriever.StorageService { + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.TransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MiniBlockUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MetaBlockUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.PeerChangesUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.UnsignedTransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.RewardTransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.BootstrapUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.StatusMetricsUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.ReceiptsUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.ScheduledSCRsUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.TxLogsUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.UserAccountsUnit, CreateMemUnitForTries()) + store.AddStorer(dataRetriever.PeerAccountsUnit, CreateMemUnitForTries()) + store.AddStorer(dataRetriever.ESDTSuppliesUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.RoundHdrHashDataUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MiniblocksMetadataUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MiniblockHashByTxHashUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.EpochByHashUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.ResultsHashesByTxHashUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.TrieEpochRootHashUnit, CreateMemUnit()) + + for i := uint32(0); i < numOfShards; i++ { + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + store.AddStorer(hdrNonceHashDataUnit, CreateMemUnit()) + } + + return store +} diff --git a/node/chainSimulator/components/storageService_test.go b/node/chainSimulator/components/storageService_test.go new file mode 100644 index 00000000000..3be398b53e6 --- /dev/null +++ b/node/chainSimulator/components/storageService_test.go @@ -0,0 +1,51 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/stretchr/testify/require" +) + +func TestCreateStore(t *testing.T) { + t.Parallel() + + store := CreateStore(2) + require.NotNil(t, store) + + expectedUnits := []dataRetriever.UnitType{ + dataRetriever.TransactionUnit, + dataRetriever.MiniBlockUnit, + dataRetriever.MetaBlockUnit, + dataRetriever.PeerChangesUnit, + dataRetriever.BlockHeaderUnit, + dataRetriever.UnsignedTransactionUnit, + dataRetriever.RewardTransactionUnit, + dataRetriever.MetaHdrNonceHashDataUnit, + dataRetriever.BootstrapUnit, + dataRetriever.StatusMetricsUnit, + dataRetriever.ReceiptsUnit, + dataRetriever.ScheduledSCRsUnit, + dataRetriever.TxLogsUnit, + dataRetriever.UserAccountsUnit, + dataRetriever.PeerAccountsUnit, + dataRetriever.ESDTSuppliesUnit, + dataRetriever.RoundHdrHashDataUnit, + dataRetriever.MiniblocksMetadataUnit, + dataRetriever.MiniblockHashByTxHashUnit, + dataRetriever.EpochByHashUnit, + dataRetriever.ResultsHashesByTxHashUnit, + dataRetriever.TrieEpochRootHashUnit, + dataRetriever.ShardHdrNonceHashDataUnit, + dataRetriever.UnitType(101), // shard 2 + } + + all := store.GetAllStorers() + require.Equal(t, len(expectedUnits), len(all)) + + for i := 0; i < len(expectedUnits); i++ { + unit, err := store.GetStorer(expectedUnits[i]) + require.NoError(t, err) + require.NotNil(t, unit) + } +} diff --git a/node/chainSimulator/components/syncedBroadcastNetwork.go b/node/chainSimulator/components/syncedBroadcastNetwork.go new file mode 100644 index 00000000000..99e8168c45e --- /dev/null +++ b/node/chainSimulator/components/syncedBroadcastNetwork.go @@ -0,0 +1,135 @@ +package components + +import ( + "errors" + "fmt" + "sync" + + "github.com/multiversx/mx-chain-communication-go/p2p" + p2pMessage "github.com/multiversx/mx-chain-communication-go/p2p/message" + "github.com/multiversx/mx-chain-core-go/core" +) + +var ( + errNilHandler = errors.New("nil handler") + errHandlerAlreadyExists = errors.New("handler already exists") + errUnknownPeer = errors.New("unknown peer") +) + +type messageReceiver interface { + receive(fromConnectedPeer core.PeerID, message p2p.MessageP2P) + HasTopic(name string) bool +} + +type syncedBroadcastNetwork struct { + mutOperation sync.RWMutex + peers map[core.PeerID]messageReceiver +} + +// NewSyncedBroadcastNetwork creates a new synced broadcast network +func NewSyncedBroadcastNetwork() *syncedBroadcastNetwork { + return &syncedBroadcastNetwork{ + peers: make(map[core.PeerID]messageReceiver), + } +} + +// RegisterMessageReceiver registers the message receiver +func (network *syncedBroadcastNetwork) RegisterMessageReceiver(handler messageReceiver, pid core.PeerID) { + if handler == nil { + log.Error("programming error in syncedBroadcastNetwork.RegisterMessageReceiver: %w", errNilHandler) + return + } + + network.mutOperation.Lock() + defer network.mutOperation.Unlock() + + _, found := network.peers[pid] + if found { + log.Error("programming error in syncedBroadcastNetwork.RegisterMessageReceiver", "pid", pid.Pretty(), "error", errHandlerAlreadyExists) + return + } + + network.peers[pid] = handler +} + +// Broadcast will iterate through peers and send the message +func (network *syncedBroadcastNetwork) Broadcast(pid core.PeerID, topic string, buff []byte) { + _, handlers := network.getPeersAndHandlers() + + for _, handler := range handlers { + message := &p2pMessage.Message{ + FromField: pid.Bytes(), + DataField: buff, + TopicField: topic, + BroadcastMethodField: p2p.Broadcast, + PeerField: pid, + } + + handler.receive(pid, message) + } +} + +// SendDirectly will try to send directly to the provided peer +func (network *syncedBroadcastNetwork) SendDirectly(from core.PeerID, topic string, buff []byte, to core.PeerID) error { + network.mutOperation.RLock() + handler, found := network.peers[to] + if !found { + network.mutOperation.RUnlock() + + return fmt.Errorf("syncedBroadcastNetwork.SendDirectly: %w, pid %s", errUnknownPeer, to.Pretty()) + } + network.mutOperation.RUnlock() + + message := &p2pMessage.Message{ + FromField: from.Bytes(), + DataField: buff, + TopicField: topic, + BroadcastMethodField: p2p.Direct, + PeerField: from, + } + + handler.receive(from, message) + + return nil +} + +// GetConnectedPeers returns all connected peers +func (network *syncedBroadcastNetwork) GetConnectedPeers() []core.PeerID { + peers, _ := network.getPeersAndHandlers() + + return peers +} + +func (network *syncedBroadcastNetwork) getPeersAndHandlers() ([]core.PeerID, []messageReceiver) { + network.mutOperation.RLock() + defer network.mutOperation.RUnlock() + + peers := make([]core.PeerID, 0, len(network.peers)) + handlers := make([]messageReceiver, 0, len(network.peers)) + + for p, handler := range network.peers { + peers = append(peers, p) + handlers = append(handlers, handler) + } + + return peers, handlers +} + +// GetConnectedPeersOnTopic will find suitable peers connected on the provided topic +func (network *syncedBroadcastNetwork) GetConnectedPeersOnTopic(topic string) []core.PeerID { + peers, handlers := network.getPeersAndHandlers() + + peersOnTopic := make([]core.PeerID, 0, len(peers)) + for idx, p := range peers { + if handlers[idx].HasTopic(topic) { + peersOnTopic = append(peersOnTopic, p) + } + } + + return peersOnTopic +} + +// IsInterfaceNil returns true if there is no value under the interface +func (network *syncedBroadcastNetwork) IsInterfaceNil() bool { + return network == nil +} diff --git a/node/chainSimulator/components/syncedBroadcastNetwork_test.go b/node/chainSimulator/components/syncedBroadcastNetwork_test.go new file mode 100644 index 00000000000..74e061a819a --- /dev/null +++ b/node/chainSimulator/components/syncedBroadcastNetwork_test.go @@ -0,0 +1,303 @@ +package components + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-communication-go/p2p" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/stretchr/testify/assert" +) + +func TestSyncedBroadcastNetwork_BroadcastShouldWorkOn3Peers(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + globalTopic := "global" + oneTwoTopic := "topic_1_2" + oneThreeTopic := "topic_1_3" + twoThreeTopic := "topic_2_3" + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(globalTopic, true) + _ = peer1.RegisterMessageProcessor(globalTopic, "", processor1) + _ = peer1.CreateTopic(oneTwoTopic, true) + _ = peer1.RegisterMessageProcessor(oneTwoTopic, "", processor1) + _ = peer1.CreateTopic(oneThreeTopic, true) + _ = peer1.RegisterMessageProcessor(oneThreeTopic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := createMessageProcessor(t, messages, peer2.ID()) + _ = peer2.CreateTopic(globalTopic, true) + _ = peer2.RegisterMessageProcessor(globalTopic, "", processor2) + _ = peer2.CreateTopic(oneTwoTopic, true) + _ = peer2.RegisterMessageProcessor(oneTwoTopic, "", processor2) + _ = peer2.CreateTopic(twoThreeTopic, true) + _ = peer2.RegisterMessageProcessor(twoThreeTopic, "", processor2) + + peer3, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor3 := createMessageProcessor(t, messages, peer3.ID()) + _ = peer3.CreateTopic(globalTopic, true) + _ = peer3.RegisterMessageProcessor(globalTopic, "", processor3) + _ = peer3.CreateTopic(oneThreeTopic, true) + _ = peer3.RegisterMessageProcessor(oneThreeTopic, "", processor3) + _ = peer3.CreateTopic(twoThreeTopic, true) + _ = peer3.RegisterMessageProcessor(twoThreeTopic, "", processor3) + + globalMessage := []byte("global message") + oneTwoMessage := []byte("1-2 message") + oneThreeMessage := []byte("1-3 message") + twoThreeMessage := []byte("2-3 message") + + peer1.Broadcast(globalTopic, globalMessage) + assert.Equal(t, globalMessage, messages[peer1.ID()][globalTopic]) + assert.Equal(t, globalMessage, messages[peer2.ID()][globalTopic]) + assert.Equal(t, globalMessage, messages[peer3.ID()][globalTopic]) + + peer1.Broadcast(oneTwoTopic, oneTwoMessage) + assert.Equal(t, oneTwoMessage, messages[peer1.ID()][oneTwoTopic]) + assert.Equal(t, oneTwoMessage, messages[peer2.ID()][oneTwoTopic]) + assert.Nil(t, messages[peer3.ID()][oneTwoTopic]) + + peer1.Broadcast(oneThreeTopic, oneThreeMessage) + assert.Equal(t, oneThreeMessage, messages[peer1.ID()][oneThreeTopic]) + assert.Nil(t, messages[peer2.ID()][oneThreeTopic]) + assert.Equal(t, oneThreeMessage, messages[peer3.ID()][oneThreeTopic]) + + peer2.Broadcast(twoThreeTopic, twoThreeMessage) + assert.Nil(t, messages[peer1.ID()][twoThreeTopic]) + assert.Equal(t, twoThreeMessage, messages[peer2.ID()][twoThreeTopic]) + assert.Equal(t, twoThreeMessage, messages[peer3.ID()][twoThreeTopic]) +} + +func TestSyncedBroadcastNetwork_BroadcastOnAnUnjoinedTopicShouldDiscardMessage(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + globalTopic := "global" + twoThreeTopic := "topic_2_3" + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(globalTopic, true) + _ = peer1.RegisterMessageProcessor(globalTopic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := createMessageProcessor(t, messages, peer2.ID()) + _ = peer2.CreateTopic(globalTopic, true) + _ = peer2.RegisterMessageProcessor(globalTopic, "", processor2) + _ = peer2.CreateTopic(twoThreeTopic, true) + _ = peer2.RegisterMessageProcessor(twoThreeTopic, "", processor2) + + peer3, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor3 := createMessageProcessor(t, messages, peer3.ID()) + _ = peer3.CreateTopic(globalTopic, true) + _ = peer3.RegisterMessageProcessor(globalTopic, "", processor3) + _ = peer3.CreateTopic(twoThreeTopic, true) + _ = peer3.RegisterMessageProcessor(twoThreeTopic, "", processor3) + + testMessage := []byte("test message") + + peer1.Broadcast(twoThreeTopic, testMessage) + + assert.Nil(t, messages[peer1.ID()][twoThreeTopic]) + assert.Nil(t, messages[peer2.ID()][twoThreeTopic]) + assert.Nil(t, messages[peer3.ID()][twoThreeTopic]) +} + +func TestSyncedBroadcastNetwork_SendDirectlyShouldWorkBetween2peers(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + topic := "topic" + testMessage := []byte("test message") + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(topic, true) + _ = peer1.RegisterMessageProcessor(topic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := createMessageProcessor(t, messages, peer2.ID()) + _ = peer2.CreateTopic(topic, true) + _ = peer2.RegisterMessageProcessor(topic, "", processor2) + + err = peer1.SendToConnectedPeer(topic, testMessage, peer2.ID()) + assert.Nil(t, err) + + assert.Nil(t, messages[peer1.ID()][topic]) + assert.Equal(t, testMessage, messages[peer2.ID()][topic]) +} + +func TestSyncedBroadcastNetwork_SendDirectlyToSelfShouldWork(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + topic := "topic" + testMessage := []byte("test message") + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(topic, true) + _ = peer1.RegisterMessageProcessor(topic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := createMessageProcessor(t, messages, peer2.ID()) + _ = peer2.CreateTopic(topic, true) + _ = peer2.RegisterMessageProcessor(topic, "", processor2) + + err = peer1.SendToConnectedPeer(topic, testMessage, peer1.ID()) + assert.Nil(t, err) + + assert.Equal(t, testMessage, messages[peer1.ID()][topic]) + assert.Nil(t, messages[peer2.ID()][topic]) +} + +func TestSyncedBroadcastNetwork_SendDirectlyShouldNotDeadlock(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + topic := "topic" + testMessage := []byte("test message") + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(topic, true) + _ = peer1.RegisterMessageProcessor(topic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := &p2pmocks.MessageProcessorStub{ + ProcessReceivedMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { + log.Debug("sending message back to", "pid", fromConnectedPeer.Pretty()) + return source.SendToConnectedPeer(message.Topic(), []byte("reply: "+string(message.Data())), fromConnectedPeer) + }, + } + _ = peer2.CreateTopic(topic, true) + _ = peer2.RegisterMessageProcessor(topic, "", processor2) + + err = peer1.SendToConnectedPeer(topic, testMessage, peer2.ID()) + assert.Nil(t, err) + + assert.Equal(t, "reply: "+string(testMessage), string(messages[peer1.ID()][topic])) + assert.Nil(t, messages[peer2.ID()][topic]) +} + +func TestSyncedBroadcastNetwork_ConnectedPeersAndAddresses(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + + peers := peer1.ConnectedPeers() + assert.Equal(t, 2, len(peers)) + + assert.Contains(t, peers, peer1.ID()) + assert.Contains(t, peers, peer2.ID()) + + assert.True(t, peer1.IsConnected(peer2.ID())) + assert.True(t, peer2.IsConnected(peer1.ID())) + assert.False(t, peer1.IsConnected("no connection")) + + addresses := peer1.ConnectedAddresses() + assert.Equal(t, 2, len(addresses)) + assert.Contains(t, addresses, fmt.Sprintf(virtualAddressTemplate, peer1.ID().Pretty())) + assert.Contains(t, addresses, peer1.Addresses()[0]) + assert.Contains(t, addresses, fmt.Sprintf(virtualAddressTemplate, peer2.ID().Pretty())) + assert.Contains(t, addresses, peer2.Addresses()[0]) +} + +func TestSyncedBroadcastNetwork_GetConnectedPeersOnTopic(t *testing.T) { + t.Parallel() + + globalTopic := "global" + oneTwoTopic := "topic_1_2" + oneThreeTopic := "topic_1_3" + twoThreeTopic := "topic_2_3" + + network := NewSyncedBroadcastNetwork() + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + _ = peer1.CreateTopic(globalTopic, false) + _ = peer1.CreateTopic(oneTwoTopic, false) + _ = peer1.CreateTopic(oneThreeTopic, false) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + _ = peer2.CreateTopic(globalTopic, false) + _ = peer2.CreateTopic(oneTwoTopic, false) + _ = peer2.CreateTopic(twoThreeTopic, false) + + peer3, err := NewSyncedMessenger(network) + assert.Nil(t, err) + _ = peer3.CreateTopic(globalTopic, false) + _ = peer3.CreateTopic(oneThreeTopic, false) + _ = peer3.CreateTopic(twoThreeTopic, false) + + peers := peer1.ConnectedPeersOnTopic(globalTopic) + assert.Equal(t, 3, len(peers)) + assert.Contains(t, peers, peer1.ID()) + assert.Contains(t, peers, peer2.ID()) + assert.Contains(t, peers, peer3.ID()) + + peers = peer1.ConnectedPeersOnTopic(oneTwoTopic) + assert.Equal(t, 2, len(peers)) + assert.Contains(t, peers, peer1.ID()) + assert.Contains(t, peers, peer2.ID()) + + peers = peer3.ConnectedPeersOnTopic(oneThreeTopic) + assert.Equal(t, 2, len(peers)) + assert.Contains(t, peers, peer1.ID()) + assert.Contains(t, peers, peer3.ID()) + + peersInfo := peer1.GetConnectedPeersInfo() + assert.Equal(t, 3, len(peersInfo.UnknownPeers)) +} + +func createMessageProcessor(t *testing.T, dataMap map[core.PeerID]map[string][]byte, pid core.PeerID) p2p.MessageProcessor { + return &p2pmocks.MessageProcessorStub{ + ProcessReceivedMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { + m, found := dataMap[pid] + if !found { + m = make(map[string][]byte) + dataMap[pid] = m + } + + // some interceptors/resolvers require that the peer field should be the same + assert.Equal(t, message.Peer().Bytes(), message.From()) + assert.Equal(t, message.Peer(), fromConnectedPeer) + m[message.Topic()] = message.Data() + + return nil + }, + } +} diff --git a/node/chainSimulator/components/syncedMessenger.go b/node/chainSimulator/components/syncedMessenger.go new file mode 100644 index 00000000000..cc437d02038 --- /dev/null +++ b/node/chainSimulator/components/syncedMessenger.go @@ -0,0 +1,391 @@ +package components + +import ( + "bytes" + "errors" + "fmt" + "sync" + "time" + + "github.com/multiversx/mx-chain-communication-go/p2p" + "github.com/multiversx/mx-chain-communication-go/p2p/libp2p/crypto" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/hashing/blake2b" + logger "github.com/multiversx/mx-chain-logger-go" +) + +const virtualAddressTemplate = "/virtual/p2p/%s" + +var ( + log = logger.GetOrCreate("node/chainSimulator") + p2pInstanceCreator, _ = crypto.NewIdentityGenerator(log) + hasher = blake2b.NewBlake2b() + errNilNetwork = errors.New("nil network") + errTopicAlreadyCreated = errors.New("topic already created") + errNilMessageProcessor = errors.New("nil message processor") + errTopicNotCreated = errors.New("topic not created") + errTopicHasProcessor = errors.New("there is already a message processor for provided topic and identifier") + errInvalidSignature = errors.New("invalid signature") + errMessengerIsClosed = errors.New("messenger is closed") +) + +type syncedMessenger struct { + mutIsClosed sync.RWMutex + isClosed bool + mutOperation sync.RWMutex + topics map[string]map[string]p2p.MessageProcessor + network SyncedBroadcastNetworkHandler + pid core.PeerID +} + +// NewSyncedMessenger creates a new synced network messenger +func NewSyncedMessenger(network SyncedBroadcastNetworkHandler) (*syncedMessenger, error) { + if check.IfNil(network) { + return nil, errNilNetwork + } + + _, pid, err := p2pInstanceCreator.CreateRandomP2PIdentity() + if err != nil { + return nil, err + } + + messenger := &syncedMessenger{ + network: network, + topics: make(map[string]map[string]p2p.MessageProcessor), + pid: pid, + } + + log.Debug("created syncedMessenger", "pid", pid.Pretty()) + + network.RegisterMessageReceiver(messenger, pid) + + return messenger, nil +} + +// HasCompatibleProtocolID returns true +func (messenger *syncedMessenger) HasCompatibleProtocolID(_ string) bool { + return true +} + +func (messenger *syncedMessenger) receive(fromConnectedPeer core.PeerID, message p2p.MessageP2P) { + if messenger.closed() { + return + } + if check.IfNil(message) { + return + } + + messenger.mutOperation.RLock() + handlers := messenger.topics[message.Topic()] + messenger.mutOperation.RUnlock() + + for _, handler := range handlers { + err := handler.ProcessReceivedMessage(message, fromConnectedPeer, messenger) + if err != nil { + log.Trace("received message syncedMessenger", + "error", err, "topic", message.Topic(), "from connected peer", fromConnectedPeer.Pretty()) + } + } +} + +// ProcessReceivedMessage does nothing and returns nil +func (messenger *syncedMessenger) ProcessReceivedMessage(_ p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { + return nil +} + +// CreateTopic will create a topic for receiving data +func (messenger *syncedMessenger) CreateTopic(name string, _ bool) error { + if messenger.closed() { + return errMessengerIsClosed + } + + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + _, found := messenger.topics[name] + if found { + return fmt.Errorf("programming error in syncedMessenger.CreateTopic, %w for topic %s", errTopicAlreadyCreated, name) + } + + messenger.topics[name] = make(map[string]p2p.MessageProcessor) + + return nil +} + +// HasTopic returns true if the topic was registered +func (messenger *syncedMessenger) HasTopic(name string) bool { + messenger.mutOperation.RLock() + defer messenger.mutOperation.RUnlock() + + _, found := messenger.topics[name] + + return found +} + +// RegisterMessageProcessor will try to register a message processor on the provided topic & identifier +func (messenger *syncedMessenger) RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error { + if messenger.closed() { + return errMessengerIsClosed + } + if check.IfNil(handler) { + return fmt.Errorf("programming error in syncedMessenger.RegisterMessageProcessor, "+ + "%w for topic %s and identifier %s", errNilMessageProcessor, topic, identifier) + } + + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + handlers, found := messenger.topics[topic] + if !found { + handlers = make(map[string]p2p.MessageProcessor) + messenger.topics[topic] = handlers + } + + _, found = handlers[identifier] + if found { + return fmt.Errorf("programming error in syncedMessenger.RegisterMessageProcessor, %w, topic %s, identifier %s", + errTopicHasProcessor, topic, identifier) + } + + handlers[identifier] = handler + + return nil +} + +// UnregisterAllMessageProcessors will unregister all message processors +func (messenger *syncedMessenger) UnregisterAllMessageProcessors() error { + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + for topic := range messenger.topics { + messenger.topics[topic] = make(map[string]p2p.MessageProcessor) + } + + return nil +} + +// UnregisterMessageProcessor will unregister the message processor for the provided topic and identifier +func (messenger *syncedMessenger) UnregisterMessageProcessor(topic string, identifier string) error { + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + handlers, found := messenger.topics[topic] + if !found { + return fmt.Errorf("programming error in syncedMessenger.UnregisterMessageProcessor, %w for topic %s", + errTopicNotCreated, topic) + } + + delete(handlers, identifier) + + return nil +} + +// Broadcast will broadcast the provided buffer on the topic in a synchronous manner +func (messenger *syncedMessenger) Broadcast(topic string, buff []byte) { + if messenger.closed() { + return + } + if !messenger.HasTopic(topic) { + return + } + + messenger.network.Broadcast(messenger.pid, topic, buff) +} + +// BroadcastOnChannel calls the Broadcast method +func (messenger *syncedMessenger) BroadcastOnChannel(_ string, topic string, buff []byte) { + messenger.Broadcast(topic, buff) +} + +// BroadcastUsingPrivateKey calls the Broadcast method +func (messenger *syncedMessenger) BroadcastUsingPrivateKey(topic string, buff []byte, _ core.PeerID, _ []byte) { + messenger.Broadcast(topic, buff) +} + +// BroadcastOnChannelUsingPrivateKey calls the Broadcast method +func (messenger *syncedMessenger) BroadcastOnChannelUsingPrivateKey(_ string, topic string, buff []byte, _ core.PeerID, _ []byte) { + messenger.Broadcast(topic, buff) +} + +// SendToConnectedPeer will send the message to the peer +func (messenger *syncedMessenger) SendToConnectedPeer(topic string, buff []byte, peerID core.PeerID) error { + if messenger.closed() { + return errMessengerIsClosed + } + + if !messenger.HasTopic(topic) { + return nil + } + + log.Trace("syncedMessenger.SendToConnectedPeer", + "from", messenger.pid.Pretty(), + "to", peerID.Pretty(), + "data", buff) + + return messenger.network.SendDirectly(messenger.pid, topic, buff, peerID) +} + +// UnJoinAllTopics will unjoin all topics +func (messenger *syncedMessenger) UnJoinAllTopics() error { + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + messenger.topics = make(map[string]map[string]p2p.MessageProcessor) + + return nil +} + +// Bootstrap does nothing and returns nil +func (messenger *syncedMessenger) Bootstrap() error { + return nil +} + +// Peers returns the network's peer ID +func (messenger *syncedMessenger) Peers() []core.PeerID { + return messenger.network.GetConnectedPeers() +} + +// Addresses returns the addresses this messenger was bound to. It returns a virtual address +func (messenger *syncedMessenger) Addresses() []string { + return []string{fmt.Sprintf(virtualAddressTemplate, messenger.pid.Pretty())} +} + +// ConnectToPeer does nothing and returns nil +func (messenger *syncedMessenger) ConnectToPeer(_ string) error { + return nil +} + +// IsConnected returns true if the peer ID is found on the network +func (messenger *syncedMessenger) IsConnected(peerID core.PeerID) bool { + peers := messenger.network.GetConnectedPeers() + for _, peer := range peers { + if peer == peerID { + return true + } + } + + return false +} + +// ConnectedPeers returns the same list as the function Peers +func (messenger *syncedMessenger) ConnectedPeers() []core.PeerID { + return messenger.Peers() +} + +// ConnectedAddresses returns all connected addresses +func (messenger *syncedMessenger) ConnectedAddresses() []string { + peers := messenger.network.GetConnectedPeers() + addresses := make([]string, 0, len(peers)) + for _, peer := range peers { + addresses = append(addresses, fmt.Sprintf(virtualAddressTemplate, peer.Pretty())) + } + + return addresses +} + +// PeerAddresses returns the virtual peer address +func (messenger *syncedMessenger) PeerAddresses(pid core.PeerID) []string { + return []string{fmt.Sprintf(virtualAddressTemplate, pid.Pretty())} +} + +// ConnectedPeersOnTopic returns the connected peers on the provided topic +func (messenger *syncedMessenger) ConnectedPeersOnTopic(topic string) []core.PeerID { + return messenger.network.GetConnectedPeersOnTopic(topic) +} + +// SetPeerShardResolver does nothing and returns nil +func (messenger *syncedMessenger) SetPeerShardResolver(_ p2p.PeerShardResolver) error { + return nil +} + +// GetConnectedPeersInfo return current connected peers info +func (messenger *syncedMessenger) GetConnectedPeersInfo() *p2p.ConnectedPeersInfo { + peersInfo := &p2p.ConnectedPeersInfo{} + peers := messenger.network.GetConnectedPeers() + for _, peer := range peers { + peersInfo.UnknownPeers = append(peersInfo.UnknownPeers, peer.Pretty()) + } + + return peersInfo +} + +// WaitForConnections does nothing +func (messenger *syncedMessenger) WaitForConnections(_ time.Duration, _ uint32) { +} + +// IsConnectedToTheNetwork returns true +func (messenger *syncedMessenger) IsConnectedToTheNetwork() bool { + return true +} + +// ThresholdMinConnectedPeers returns 0 +func (messenger *syncedMessenger) ThresholdMinConnectedPeers() int { + return 0 +} + +// SetThresholdMinConnectedPeers does nothing and returns nil +func (messenger *syncedMessenger) SetThresholdMinConnectedPeers(_ int) error { + return nil +} + +// SetPeerDenialEvaluator does nothing and returns nil +func (messenger *syncedMessenger) SetPeerDenialEvaluator(_ p2p.PeerDenialEvaluator) error { + return nil +} + +// ID returns the peer ID +func (messenger *syncedMessenger) ID() core.PeerID { + return messenger.pid +} + +// Port returns 0 +func (messenger *syncedMessenger) Port() int { + return 0 +} + +// Sign will return the hash(messenger.ID + payload) +func (messenger *syncedMessenger) Sign(payload []byte) ([]byte, error) { + return hasher.Compute(messenger.pid.Pretty() + string(payload)), nil +} + +// Verify will check if the provided signature === hash(pid + payload) +func (messenger *syncedMessenger) Verify(payload []byte, pid core.PeerID, signature []byte) error { + sig := hasher.Compute(pid.Pretty() + string(payload)) + if bytes.Equal(sig, signature) { + return nil + } + + return errInvalidSignature +} + +// SignUsingPrivateKey will return an empty byte slice +func (messenger *syncedMessenger) SignUsingPrivateKey(_ []byte, _ []byte) ([]byte, error) { + return make([]byte, 0), nil +} + +// SetDebugger will set the provided debugger +func (messenger *syncedMessenger) SetDebugger(_ p2p.Debugger) error { + return nil +} + +// Close does nothing and returns nil +func (messenger *syncedMessenger) Close() error { + messenger.mutIsClosed.Lock() + messenger.isClosed = true + messenger.mutIsClosed.Unlock() + + return nil +} + +func (messenger *syncedMessenger) closed() bool { + messenger.mutIsClosed.RLock() + defer messenger.mutIsClosed.RUnlock() + + return messenger.isClosed +} + +// IsInterfaceNil returns true if there is no value under the interface +func (messenger *syncedMessenger) IsInterfaceNil() bool { + return messenger == nil +} diff --git a/node/chainSimulator/components/syncedMessenger_test.go b/node/chainSimulator/components/syncedMessenger_test.go new file mode 100644 index 00000000000..c8c17918141 --- /dev/null +++ b/node/chainSimulator/components/syncedMessenger_test.go @@ -0,0 +1,260 @@ +package components + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/stretchr/testify/assert" +) + +func TestNewSyncedMessenger(t *testing.T) { + t.Parallel() + + t.Run("nil network should error", func(t *testing.T) { + t.Parallel() + + messenger, err := NewSyncedMessenger(nil) + assert.Nil(t, messenger) + assert.Equal(t, errNilNetwork, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + messenger, err := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + assert.NotNil(t, messenger) + assert.Nil(t, err) + }) +} + +func TestSyncedMessenger_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var messenger *syncedMessenger + assert.True(t, messenger.IsInterfaceNil()) + + messenger, _ = NewSyncedMessenger(NewSyncedBroadcastNetwork()) + assert.False(t, messenger.IsInterfaceNil()) +} + +func TestSyncedMessenger_DisabledMethodsShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, fmt.Sprintf("should have not panicked: %v", r)) + } + }() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + assert.Nil(t, messenger.Close()) + assert.Zero(t, messenger.Port()) + assert.Nil(t, messenger.SetPeerDenialEvaluator(nil)) + assert.Nil(t, messenger.SetThresholdMinConnectedPeers(0)) + assert.Zero(t, messenger.ThresholdMinConnectedPeers()) + assert.True(t, messenger.IsConnectedToTheNetwork()) + assert.Nil(t, messenger.SetPeerShardResolver(nil)) + assert.Nil(t, messenger.ConnectToPeer("")) + assert.Nil(t, messenger.Bootstrap()) + assert.Nil(t, messenger.ProcessReceivedMessage(nil, "", nil)) + + messenger.WaitForConnections(0, 0) + + buff, err := messenger.SignUsingPrivateKey(nil, nil) + assert.Empty(t, buff) + assert.Nil(t, err) +} + +func TestSyncedMessenger_RegisterMessageProcessor(t *testing.T) { + t.Parallel() + + t.Run("nil message processor should error", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + err := messenger.RegisterMessageProcessor("", "", nil) + assert.ErrorIs(t, err, errNilMessageProcessor) + }) + t.Run("processor exists, should error", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + err := messenger.CreateTopic("t", false) + assert.Nil(t, err) + + processor1 := &p2pmocks.MessageProcessorStub{} + err = messenger.RegisterMessageProcessor("t", "i", processor1) + assert.Nil(t, err) + + processor2 := &p2pmocks.MessageProcessorStub{} + err = messenger.RegisterMessageProcessor("t", "i", processor2) + assert.ErrorIs(t, err, errTopicHasProcessor) + + messenger.mutOperation.RLock() + defer messenger.mutOperation.RUnlock() + + assert.True(t, messenger.topics["t"]["i"] == processor1) // pointer testing + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + err := messenger.CreateTopic("t", false) + assert.Nil(t, err) + + processor := &p2pmocks.MessageProcessorStub{} + err = messenger.RegisterMessageProcessor("t", "i", processor) + assert.Nil(t, err) + + messenger.mutOperation.RLock() + defer messenger.mutOperation.RUnlock() + + assert.True(t, messenger.topics["t"]["i"] == processor) // pointer testing + }) +} + +func TestSyncedMessenger_UnregisterAllMessageProcessors(t *testing.T) { + t.Parallel() + + t.Run("no topics should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + + err := messenger.UnregisterAllMessageProcessors() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + }) + t.Run("one topic but no processor should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + topic := "topic" + _ = messenger.CreateTopic(topic, true) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics[topic]) + messenger.mutOperation.RUnlock() + + err := messenger.UnregisterAllMessageProcessors() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics[topic]) + messenger.mutOperation.RUnlock() + }) + t.Run("one topic with processor should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + topic := "topic" + identifier := "identifier" + _ = messenger.CreateTopic(topic, true) + _ = messenger.RegisterMessageProcessor(topic, identifier, &p2pmocks.MessageProcessorStub{}) + + messenger.mutOperation.RLock() + assert.NotNil(t, messenger.topics[topic][identifier]) + messenger.mutOperation.RUnlock() + + err := messenger.UnregisterAllMessageProcessors() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics[topic]) + messenger.mutOperation.RUnlock() + }) +} + +func TestSyncedMessenger_UnregisterMessageProcessor(t *testing.T) { + t.Parallel() + + t.Run("topic not found should error", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + topic := "topic" + identifier := "identifier" + err := messenger.UnregisterMessageProcessor(topic, identifier) + assert.ErrorIs(t, err, errTopicNotCreated) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + topic := "topic" + identifier1 := "identifier1" + identifier2 := "identifier2" + + _ = messenger.CreateTopic(topic, true) + _ = messenger.RegisterMessageProcessor(topic, identifier1, &p2pmocks.MessageProcessorStub{}) + _ = messenger.RegisterMessageProcessor(topic, identifier2, &p2pmocks.MessageProcessorStub{}) + + messenger.mutOperation.RLock() + assert.Equal(t, 2, len(messenger.topics[topic])) + assert.NotNil(t, messenger.topics[topic][identifier1]) + assert.NotNil(t, messenger.topics[topic][identifier2]) + messenger.mutOperation.RUnlock() + + err := messenger.UnregisterMessageProcessor(topic, identifier1) + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Equal(t, 1, len(messenger.topics[topic])) + assert.NotNil(t, messenger.topics[topic][identifier2]) + messenger.mutOperation.RUnlock() + }) +} + +func TestSyncedMessenger_UnJoinAllTopics(t *testing.T) { + t.Parallel() + + t.Run("no topics registered should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + + err := messenger.UnJoinAllTopics() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + }) + t.Run("one registered topic should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + topic := "topic" + _ = messenger.CreateTopic(topic, true) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics[topic]) + messenger.mutOperation.RUnlock() + + err := messenger.UnJoinAllTopics() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + }) +} diff --git a/node/chainSimulator/components/syncedTxsSender.go b/node/chainSimulator/components/syncedTxsSender.go new file mode 100644 index 00000000000..9434c72a041 --- /dev/null +++ b/node/chainSimulator/components/syncedTxsSender.go @@ -0,0 +1,110 @@ +package components + +import ( + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/factory" + "github.com/multiversx/mx-chain-go/sharding" +) + +// ArgsSyncedTxsSender is a holder struct for all necessary arguments to create a NewSyncedTxsSender +type ArgsSyncedTxsSender struct { + Marshaller marshal.Marshalizer + ShardCoordinator sharding.Coordinator + NetworkMessenger NetworkMessenger + DataPacker process.DataPacker +} + +type syncedTxsSender struct { + marshaller marshal.Marshalizer + shardCoordinator sharding.Coordinator + networkMessenger NetworkMessenger + dataPacker process.DataPacker +} + +// NewSyncedTxsSender creates a new instance of syncedTxsSender +func NewSyncedTxsSender(args ArgsSyncedTxsSender) (*syncedTxsSender, error) { + if check.IfNil(args.Marshaller) { + return nil, process.ErrNilMarshalizer + } + if check.IfNil(args.ShardCoordinator) { + return nil, process.ErrNilShardCoordinator + } + if check.IfNil(args.NetworkMessenger) { + return nil, process.ErrNilMessenger + } + if check.IfNil(args.DataPacker) { + return nil, dataRetriever.ErrNilDataPacker + } + + ret := &syncedTxsSender{ + marshaller: args.Marshaller, + shardCoordinator: args.ShardCoordinator, + networkMessenger: args.NetworkMessenger, + dataPacker: args.DataPacker, + } + + return ret, nil +} + +// SendBulkTransactions sends the provided transactions as a bulk, optimizing transfer between nodes +func (sender *syncedTxsSender) SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) { + if len(txs) == 0 { + return 0, process.ErrNoTxToProcess + } + + sender.sendBulkTransactions(txs) + + return uint64(len(txs)), nil +} + +func (sender *syncedTxsSender) sendBulkTransactions(txs []*transaction.Transaction) { + transactionsByShards := make(map[uint32][][]byte) + for _, tx := range txs { + marshalledTx, err := sender.marshaller.Marshal(tx) + if err != nil { + log.Warn("txsSender.sendBulkTransactions", + "marshaller error", err, + ) + continue + } + + senderShardId := sender.shardCoordinator.ComputeId(tx.SndAddr) + transactionsByShards[senderShardId] = append(transactionsByShards[senderShardId], marshalledTx) + } + + for shardId, txsForShard := range transactionsByShards { + err := sender.sendBulkTransactionsFromShard(txsForShard, shardId) + log.LogIfError(err) + } +} + +func (sender *syncedTxsSender) sendBulkTransactionsFromShard(transactions [][]byte, senderShardId uint32) error { + // the topic identifier is made of the current shard id and sender's shard id + identifier := factory.TransactionTopic + sender.shardCoordinator.CommunicationIdentifier(senderShardId) + + packets, err := sender.dataPacker.PackDataInChunks(transactions, common.MaxBulkTransactionSize) + if err != nil { + return err + } + + for _, buff := range packets { + sender.networkMessenger.Broadcast(identifier, buff) + } + + return nil +} + +// Close returns nil +func (sender *syncedTxsSender) Close() error { + return nil +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (sender *syncedTxsSender) IsInterfaceNil() bool { + return sender == nil +} diff --git a/node/chainSimulator/components/syncedTxsSender_test.go b/node/chainSimulator/components/syncedTxsSender_test.go new file mode 100644 index 00000000000..9af295c47cf --- /dev/null +++ b/node/chainSimulator/components/syncedTxsSender_test.go @@ -0,0 +1,211 @@ +package components + +import ( + "fmt" + "strings" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dataRetriever/mock" + "github.com/multiversx/mx-chain-go/process" + processMock "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/stretchr/testify/assert" +) + +func createMockSyncedTxsSenderArgs() ArgsSyncedTxsSender { + return ArgsSyncedTxsSender{ + Marshaller: &marshallerMock.MarshalizerMock{}, + ShardCoordinator: testscommon.NewMultiShardsCoordinatorMock(3), + NetworkMessenger: &p2pmocks.MessengerStub{}, + DataPacker: &mock.DataPackerStub{}, + } +} + +func TestNewSyncedTxsSender(t *testing.T) { + t.Parallel() + + t.Run("nil marshaller should error", func(t *testing.T) { + t.Parallel() + + args := createMockSyncedTxsSenderArgs() + args.Marshaller = nil + sender, err := NewSyncedTxsSender(args) + + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, sender) + }) + t.Run("nil shard coordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockSyncedTxsSenderArgs() + args.ShardCoordinator = nil + sender, err := NewSyncedTxsSender(args) + + assert.Equal(t, process.ErrNilShardCoordinator, err) + assert.Nil(t, sender) + }) + t.Run("nil network messenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockSyncedTxsSenderArgs() + args.NetworkMessenger = nil + sender, err := NewSyncedTxsSender(args) + + assert.Equal(t, process.ErrNilMessenger, err) + assert.Nil(t, sender) + }) + t.Run("nil data packer should error", func(t *testing.T) { + t.Parallel() + + args := createMockSyncedTxsSenderArgs() + args.DataPacker = nil + sender, err := NewSyncedTxsSender(args) + + assert.Equal(t, dataRetriever.ErrNilDataPacker, err) + assert.Nil(t, sender) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := createMockSyncedTxsSenderArgs() + sender, err := NewSyncedTxsSender(args) + + assert.Nil(t, err) + assert.NotNil(t, sender) + }) +} + +func TestSyncedTxsSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var instance *syncedTxsSender + assert.True(t, instance.IsInterfaceNil()) + + instance = &syncedTxsSender{} + assert.False(t, instance.IsInterfaceNil()) +} + +func TestSyncedTxsSender_Close(t *testing.T) { + t.Parallel() + + args := createMockSyncedTxsSenderArgs() + sender, _ := NewSyncedTxsSender(args) + + err := sender.Close() + assert.Nil(t, err) +} + +func TestSyncedTxsSender_SendBulkTransactions(t *testing.T) { + t.Parallel() + + senderAShard0 := []byte("sender A shard 0") + senderBShard1 := []byte("sender B shard 1") + senderCShard0 := []byte("sender C shard 0") + senderDShard1 := []byte("sender D shard 1") + testTransactions := []*transaction.Transaction{ + { + SndAddr: senderAShard0, + }, + { + SndAddr: senderBShard1, + }, + { + SndAddr: senderCShard0, + }, + { + SndAddr: senderDShard1, + }, + } + marshaller := &marshallerMock.MarshalizerMock{} + + marshalledTxs := make([][]byte, 0, len(testTransactions)) + for _, tx := range testTransactions { + buff, _ := marshaller.Marshal(tx) + marshalledTxs = append(marshalledTxs, buff) + } + + mockShardCoordinator := &processMock.ShardCoordinatorStub{ + ComputeIdCalled: func(address []byte) uint32 { + addrString := string(address) + if strings.Contains(addrString, "shard 0") { + return 0 + } + if strings.Contains(addrString, "shard 1") { + return 1 + } + + return core.MetachainShardId + }, + SelfIdCalled: func() uint32 { + return 1 + }, + CommunicationIdentifierCalled: func(destShardID uint32) string { + if destShardID == 1 { + return "_1" + } + if destShardID < 1 { + return fmt.Sprintf("_%d_1", destShardID) + } + + return fmt.Sprintf("_1_%d", destShardID) + }, + } + sentData := make(map[string][][]byte) + netMessenger := &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + sentData[topic] = append(sentData[topic], buff) + }, + } + mockDataPacker := &mock.DataPackerStub{ + PackDataInChunksCalled: func(data [][]byte, limit int) ([][]byte, error) { + return data, nil + }, + } + + t.Run("no transactions provided should error", func(t *testing.T) { + t.Parallel() + + args := createMockSyncedTxsSenderArgs() + sender, _ := NewSyncedTxsSender(args) + + num, err := sender.SendBulkTransactions(nil) + assert.Equal(t, process.ErrNoTxToProcess, err) + assert.Zero(t, num) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := ArgsSyncedTxsSender{ + Marshaller: marshaller, + ShardCoordinator: mockShardCoordinator, + NetworkMessenger: netMessenger, + DataPacker: mockDataPacker, + } + sender, _ := NewSyncedTxsSender(args) + + num, err := sender.SendBulkTransactions(testTransactions) + assert.Nil(t, err) + assert.Equal(t, uint64(4), num) + + expectedSentSliceForShard0 := make([][]byte, 0) + expectedSentSliceForShard0 = append(expectedSentSliceForShard0, marshalledTxs[0]) + expectedSentSliceForShard0 = append(expectedSentSliceForShard0, marshalledTxs[2]) + + expectedSentSliceForShard1 := make([][]byte, 0) + expectedSentSliceForShard1 = append(expectedSentSliceForShard1, marshalledTxs[1]) + expectedSentSliceForShard1 = append(expectedSentSliceForShard1, marshalledTxs[3]) + + expectedSentMap := map[string][][]byte{ + "transactions_1": expectedSentSliceForShard1, + "transactions_0_1": expectedSentSliceForShard0, + } + + assert.Equal(t, expectedSentMap, sentData) + + }) +} diff --git a/node/chainSimulator/components/testOnlyProcessingNode.go b/node/chainSimulator/components/testOnlyProcessingNode.go new file mode 100644 index 00000000000..28256c4820f --- /dev/null +++ b/node/chainSimulator/components/testOnlyProcessingNode.go @@ -0,0 +1,623 @@ +package components + +import ( + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "math/big" + + "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + dataRetrieverFactory "github.com/multiversx/mx-chain-go/dataRetriever/factory" + "github.com/multiversx/mx-chain-go/facade" + "github.com/multiversx/mx-chain-go/factory" + bootstrapComp "github.com/multiversx/mx-chain-go/factory/bootstrap" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block/postprocess" + "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + + "github.com/multiversx/mx-chain-core-go/core" + chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/endProcess" +) + +// ArgsTestOnlyProcessingNode represents the DTO struct for the NewTestOnlyProcessingNode constructor function +type ArgsTestOnlyProcessingNode struct { + Configs config.Configs + APIInterface APIConfigurator + + ChanStopNodeProcess chan endProcess.ArgEndProcess + SyncedBroadcastNetwork SyncedBroadcastNetworkHandler + + InitialRound int64 + InitialNonce uint64 + GasScheduleFilename string + NumShards uint32 + ShardIDStr string + BypassTxSignatureCheck bool + MinNodesPerShard uint32 + ConsensusGroupSize uint32 + MinNodesMeta uint32 + MetaChainConsensusGroupSize uint32 + RoundDurationInMillis uint64 + VmQueryDelayAfterStartInMs uint64 +} + +type testOnlyProcessingNode struct { + closeHandler *closeHandler + CoreComponentsHolder factory.CoreComponentsHandler + StatusCoreComponents factory.StatusCoreComponentsHandler + StateComponentsHolder factory.StateComponentsHandler + StatusComponentsHolder factory.StatusComponentsHandler + CryptoComponentsHolder factory.CryptoComponentsHandler + NetworkComponentsHolder factory.NetworkComponentsHandler + BootstrapComponentsHolder factory.BootstrapComponentsHandler + ProcessComponentsHolder factory.ProcessComponentsHandler + DataComponentsHolder factory.DataComponentsHandler + + NodesCoordinator nodesCoordinator.NodesCoordinator + ChainHandler chainData.ChainHandler + ArgumentsParser process.ArgumentsParser + TransactionFeeHandler process.TransactionFeeHandler + StoreService dataRetriever.StorageService + DataPool dataRetriever.PoolsHolder + broadcastMessenger consensus.BroadcastMessenger + + httpServer shared.UpgradeableHttpServerHandler + facadeHandler shared.FacadeHandler +} + +// NewTestOnlyProcessingNode creates a new instance of a node that is able to only process transactions +func NewTestOnlyProcessingNode(args ArgsTestOnlyProcessingNode) (*testOnlyProcessingNode, error) { + instance := &testOnlyProcessingNode{ + ArgumentsParser: smartContract.NewArgumentParser(), + StoreService: CreateStore(args.NumShards), + closeHandler: NewCloseHandler(), + } + + var err error + instance.TransactionFeeHandler = postprocess.NewFeeAccumulator() + + instance.CoreComponentsHolder, err = CreateCoreComponents(ArgsCoreComponentsHolder{ + Config: *args.Configs.GeneralConfig, + EnableEpochsConfig: args.Configs.EpochConfig.EnableEpochs, + RoundsConfig: *args.Configs.RoundConfig, + EconomicsConfig: *args.Configs.EconomicsConfig, + ChanStopNodeProcess: args.ChanStopNodeProcess, + NumShards: args.NumShards, + WorkingDir: args.Configs.FlagsConfig.WorkingDir, + GasScheduleFilename: args.GasScheduleFilename, + NodesSetupPath: args.Configs.ConfigurationPathsHolder.Nodes, + InitialRound: args.InitialRound, + MinNodesPerShard: args.MinNodesPerShard, + ConsensusGroupSize: args.ConsensusGroupSize, + MinNodesMeta: args.MinNodesMeta, + MetaChainConsensusGroupSize: args.MetaChainConsensusGroupSize, + RoundDurationInMs: args.RoundDurationInMillis, + RatingConfig: *args.Configs.RatingsConfig, + }) + if err != nil { + return nil, err + } + + instance.StatusCoreComponents, err = CreateStatusCoreComponents(args.Configs, instance.CoreComponentsHolder) + if err != nil { + return nil, err + } + + instance.CryptoComponentsHolder, err = CreateCryptoComponents(ArgsCryptoComponentsHolder{ + Config: *args.Configs.GeneralConfig, + EnableEpochsConfig: args.Configs.EpochConfig.EnableEpochs, + Preferences: *args.Configs.PreferencesConfig, + CoreComponentsHolder: instance.CoreComponentsHolder, + BypassTxSignatureCheck: args.BypassTxSignatureCheck, + AllValidatorKeysPemFileName: args.Configs.ConfigurationPathsHolder.AllValidatorKeys, + }) + if err != nil { + return nil, err + } + + instance.NetworkComponentsHolder, err = CreateNetworkComponents(args.SyncedBroadcastNetwork) + if err != nil { + return nil, err + } + + instance.BootstrapComponentsHolder, err = CreateBootstrapComponents(ArgsBootstrapComponentsHolder{ + CoreComponents: instance.CoreComponentsHolder, + CryptoComponents: instance.CryptoComponentsHolder, + NetworkComponents: instance.NetworkComponentsHolder, + StatusCoreComponents: instance.StatusCoreComponents, + WorkingDir: args.Configs.FlagsConfig.WorkingDir, + FlagsConfig: *args.Configs.FlagsConfig, + ImportDBConfig: *args.Configs.ImportDbConfig, + PrefsConfig: *args.Configs.PreferencesConfig, + Config: *args.Configs.GeneralConfig, + ShardIDStr: args.ShardIDStr, + }) + if err != nil { + return nil, err + } + + selfShardID := instance.GetShardCoordinator().SelfId() + instance.StatusComponentsHolder, err = CreateStatusComponents( + selfShardID, + instance.StatusCoreComponents.AppStatusHandler(), + args.Configs.GeneralConfig.GeneralSettings.StatusPollingIntervalSec, + *args.Configs.ExternalConfig, + instance.CoreComponentsHolder, + ) + if err != nil { + return nil, err + } + + err = instance.createBlockChain(selfShardID) + if err != nil { + return nil, err + } + + instance.StateComponentsHolder, err = CreateStateComponents(ArgsStateComponents{ + Config: *args.Configs.GeneralConfig, + CoreComponents: instance.CoreComponentsHolder, + StatusCore: instance.StatusCoreComponents, + StoreService: instance.StoreService, + ChainHandler: instance.ChainHandler, + }) + if err != nil { + return nil, err + } + + err = instance.createDataPool(args) + if err != nil { + return nil, err + } + err = instance.createNodesCoordinator(args.Configs.PreferencesConfig.Preferences, *args.Configs.GeneralConfig) + if err != nil { + return nil, err + } + + instance.DataComponentsHolder, err = CreateDataComponents(ArgsDataComponentsHolder{ + Chain: instance.ChainHandler, + StorageService: instance.StoreService, + DataPool: instance.DataPool, + InternalMarshaller: instance.CoreComponentsHolder.InternalMarshalizer(), + }) + if err != nil { + return nil, err + } + + instance.ProcessComponentsHolder, err = CreateProcessComponents(ArgsProcessComponentsHolder{ + CoreComponents: instance.CoreComponentsHolder, + CryptoComponents: instance.CryptoComponentsHolder, + NetworkComponents: instance.NetworkComponentsHolder, + BootstrapComponents: instance.BootstrapComponentsHolder, + StateComponents: instance.StateComponentsHolder, + StatusComponents: instance.StatusComponentsHolder, + StatusCoreComponents: instance.StatusCoreComponents, + FlagsConfig: *args.Configs.FlagsConfig, + ImportDBConfig: *args.Configs.ImportDbConfig, + PrefsConfig: *args.Configs.PreferencesConfig, + Config: *args.Configs.GeneralConfig, + EconomicsConfig: *args.Configs.EconomicsConfig, + SystemSCConfig: *args.Configs.SystemSCConfig, + EpochConfig: *args.Configs.EpochConfig, + RoundConfig: *args.Configs.RoundConfig, + ConfigurationPathsHolder: *args.Configs.ConfigurationPathsHolder, + NodesCoordinator: instance.NodesCoordinator, + DataComponents: instance.DataComponentsHolder, + GenesisNonce: args.InitialNonce, + GenesisRound: uint64(args.InitialRound), + }) + if err != nil { + return nil, err + } + + err = instance.StatusComponentsHolder.SetForkDetector(instance.ProcessComponentsHolder.ForkDetector()) + if err != nil { + return nil, err + } + + err = instance.StatusComponentsHolder.StartPolling() + if err != nil { + return nil, err + } + + err = instance.createBroadcastMessenger() + if err != nil { + return nil, err + } + + err = instance.createFacade(args.Configs, args.APIInterface, args.VmQueryDelayAfterStartInMs) + if err != nil { + return nil, err + } + + err = instance.createHttpServer(args.Configs) + if err != nil { + return nil, err + } + + instance.collectClosableComponents(args.APIInterface) + + return instance, nil +} + +func (node *testOnlyProcessingNode) createBlockChain(selfShardID uint32) error { + var err error + if selfShardID == core.MetachainShardId { + node.ChainHandler, err = blockchain.NewMetaChain(node.StatusCoreComponents.AppStatusHandler()) + } else { + node.ChainHandler, err = blockchain.NewBlockChain(node.StatusCoreComponents.AppStatusHandler()) + } + + return err +} + +func (node *testOnlyProcessingNode) createDataPool(args ArgsTestOnlyProcessingNode) error { + var err error + + argsDataPool := dataRetrieverFactory.ArgsDataPool{ + Config: args.Configs.GeneralConfig, + EconomicsData: node.CoreComponentsHolder.EconomicsData(), + ShardCoordinator: node.BootstrapComponentsHolder.ShardCoordinator(), + Marshalizer: node.CoreComponentsHolder.InternalMarshalizer(), + PathManager: node.CoreComponentsHolder.PathHandler(), + } + + node.DataPool, err = dataRetrieverFactory.NewDataPoolFromConfig(argsDataPool) + + return err +} + +func (node *testOnlyProcessingNode) createNodesCoordinator(pref config.PreferencesConfig, generalConfig config.Config) error { + nodesShufflerOut, err := bootstrapComp.CreateNodesShuffleOut( + node.CoreComponentsHolder.GenesisNodesSetup(), + generalConfig.EpochStartConfig, + node.CoreComponentsHolder.ChanStopNodeProcess(), + ) + if err != nil { + return err + } + + bootstrapStorer, err := node.StoreService.GetStorer(dataRetriever.BootstrapUnit) + if err != nil { + return err + } + + shardID := node.BootstrapComponentsHolder.ShardCoordinator().SelfId() + shardIDStr := fmt.Sprintf("%d", shardID) + if shardID == core.MetachainShardId { + shardIDStr = "metachain" + } + + pref.DestinationShardAsObserver = shardIDStr + + node.NodesCoordinator, err = bootstrapComp.CreateNodesCoordinator( + nodesShufflerOut, + node.CoreComponentsHolder.GenesisNodesSetup(), + pref, + node.CoreComponentsHolder.EpochStartNotifierWithConfirm(), + node.CryptoComponentsHolder.PublicKey(), + node.CoreComponentsHolder.InternalMarshalizer(), + node.CoreComponentsHolder.Hasher(), + node.CoreComponentsHolder.Rater(), + bootstrapStorer, + node.CoreComponentsHolder.NodesShuffler(), + node.BootstrapComponentsHolder.ShardCoordinator().SelfId(), + node.BootstrapComponentsHolder.EpochBootstrapParams(), + node.BootstrapComponentsHolder.EpochBootstrapParams().Epoch(), + node.CoreComponentsHolder.ChanStopNodeProcess(), + node.CoreComponentsHolder.NodeTypeProvider(), + node.CoreComponentsHolder.EnableEpochsHandler(), + node.DataPool.CurrentEpochValidatorInfo(), + node.BootstrapComponentsHolder.NodesCoordinatorRegistryFactory(), + ) + if err != nil { + return err + } + + return nil +} + +func (node *testOnlyProcessingNode) createBroadcastMessenger() error { + broadcastMessenger, err := sposFactory.GetBroadcastMessenger( + node.CoreComponentsHolder.InternalMarshalizer(), + node.CoreComponentsHolder.Hasher(), + node.NetworkComponentsHolder.NetworkMessenger(), + node.ProcessComponentsHolder.ShardCoordinator(), + node.CryptoComponentsHolder.PeerSignatureHandler(), + node.DataComponentsHolder.Datapool().Headers(), + node.ProcessComponentsHolder.InterceptorsContainer(), + node.CoreComponentsHolder.AlarmScheduler(), + node.CryptoComponentsHolder.KeysHandler(), + ) + if err != nil { + return err + } + + node.broadcastMessenger, err = NewInstantBroadcastMessenger(broadcastMessenger, node.BootstrapComponentsHolder.ShardCoordinator()) + return err +} + +// GetProcessComponents will return the process components +func (node *testOnlyProcessingNode) GetProcessComponents() factory.ProcessComponentsHolder { + return node.ProcessComponentsHolder +} + +// GetChainHandler will return the chain handler +func (node *testOnlyProcessingNode) GetChainHandler() chainData.ChainHandler { + return node.ChainHandler +} + +// GetBroadcastMessenger will return the broadcast messenger +func (node *testOnlyProcessingNode) GetBroadcastMessenger() consensus.BroadcastMessenger { + return node.broadcastMessenger +} + +// GetShardCoordinator will return the shard coordinator +func (node *testOnlyProcessingNode) GetShardCoordinator() sharding.Coordinator { + return node.BootstrapComponentsHolder.ShardCoordinator() +} + +// GetCryptoComponents will return the crypto components +func (node *testOnlyProcessingNode) GetCryptoComponents() factory.CryptoComponentsHolder { + return node.CryptoComponentsHolder +} + +// GetCoreComponents will return the core components +func (node *testOnlyProcessingNode) GetCoreComponents() factory.CoreComponentsHolder { + return node.CoreComponentsHolder +} + +// GetDataComponents will return the data components +func (node *testOnlyProcessingNode) GetDataComponents() factory.DataComponentsHolder { + return node.DataComponentsHolder +} + +// GetStateComponents will return the state components +func (node *testOnlyProcessingNode) GetStateComponents() factory.StateComponentsHolder { + return node.StateComponentsHolder +} + +// GetFacadeHandler will return the facade handler +func (node *testOnlyProcessingNode) GetFacadeHandler() shared.FacadeHandler { + return node.facadeHandler +} + +// GetStatusCoreComponents will return the status core components +func (node *testOnlyProcessingNode) GetStatusCoreComponents() factory.StatusCoreComponentsHolder { + return node.StatusCoreComponents +} + +func (node *testOnlyProcessingNode) collectClosableComponents(apiInterface APIConfigurator) { + node.closeHandler.AddComponent(node.ProcessComponentsHolder) + node.closeHandler.AddComponent(node.DataComponentsHolder) + node.closeHandler.AddComponent(node.StateComponentsHolder) + node.closeHandler.AddComponent(node.StatusComponentsHolder) + node.closeHandler.AddComponent(node.BootstrapComponentsHolder) + node.closeHandler.AddComponent(node.NetworkComponentsHolder) + node.closeHandler.AddComponent(node.StatusCoreComponents) + node.closeHandler.AddComponent(node.CoreComponentsHolder) + node.closeHandler.AddComponent(node.facadeHandler) + + // TODO remove this after http server fix + shardID := node.GetShardCoordinator().SelfId() + if facade.DefaultRestPortOff != apiInterface.RestApiInterface(shardID) { + node.closeHandler.AddComponent(node.httpServer) + } +} + +// SetKeyValueForAddress will set the provided state for the given address +func (node *testOnlyProcessingNode) SetKeyValueForAddress(address []byte, keyValueMap map[string]string) error { + userAccount, err := node.getUserAccount(address) + if err != nil { + return err + } + + err = setKeyValueMap(userAccount, keyValueMap) + if err != nil { + return err + } + + accountsAdapter := node.StateComponentsHolder.AccountsAdapter() + err = accountsAdapter.SaveAccount(userAccount) + if err != nil { + return err + } + + _, err = accountsAdapter.Commit() + + return err +} + +func setKeyValueMap(userAccount state.UserAccountHandler, keyValueMap map[string]string) error { + for keyHex, valueHex := range keyValueMap { + keyDecoded, err := hex.DecodeString(keyHex) + if err != nil { + return fmt.Errorf("cannot decode key, error: %w", err) + } + valueDecoded, err := hex.DecodeString(valueHex) + if err != nil { + return fmt.Errorf("cannot decode value, error: %w", err) + } + + err = userAccount.SaveKeyValue(keyDecoded, valueDecoded) + if err != nil { + return err + } + } + + return nil +} + +// SetStateForAddress will set the state for the give address +func (node *testOnlyProcessingNode) SetStateForAddress(address []byte, addressState *dtos.AddressState) error { + userAccount, err := node.getUserAccount(address) + if err != nil { + return err + } + + err = setNonceAndBalanceForAccount(userAccount, addressState.Nonce, addressState.Balance) + if err != nil { + return err + } + + err = setKeyValueMap(userAccount, addressState.Pairs) + if err != nil { + return err + } + + err = node.setScDataIfNeeded(address, userAccount, addressState) + if err != nil { + return err + } + + rootHash, err := base64.StdEncoding.DecodeString(addressState.RootHash) + if err != nil { + return err + } + if len(rootHash) != 0 { + userAccount.SetRootHash(rootHash) + } + + accountsAdapter := node.StateComponentsHolder.AccountsAdapter() + err = accountsAdapter.SaveAccount(userAccount) + if err != nil { + return err + } + + _, err = accountsAdapter.Commit() + return err +} + +// RemoveAccount will remove the account for the given address +func (node *testOnlyProcessingNode) RemoveAccount(address []byte) error { + accountsAdapter := node.StateComponentsHolder.AccountsAdapter() + err := accountsAdapter.RemoveAccount(address) + if err != nil { + return err + } + + _, err = accountsAdapter.Commit() + return err +} + +// ForceChangeOfEpoch will force change of epoch +func (node *testOnlyProcessingNode) ForceChangeOfEpoch() error { + currentHeader := node.DataComponentsHolder.Blockchain().GetCurrentBlockHeader() + if currentHeader == nil { + currentHeader = node.DataComponentsHolder.Blockchain().GetGenesisHeader() + } + + node.ProcessComponentsHolder.EpochStartTrigger().ForceEpochStart(currentHeader.GetRound() + 1) + + return nil +} + +func setNonceAndBalanceForAccount(userAccount state.UserAccountHandler, nonce *uint64, balance string) error { + if nonce != nil { + // set nonce to zero + userAccount.IncreaseNonce(-userAccount.GetNonce()) + // set nonce with the provided value + userAccount.IncreaseNonce(*nonce) + } + + if balance == "" { + return nil + } + + providedBalance, ok := big.NewInt(0).SetString(balance, 10) + if !ok { + return errors.New("cannot convert string balance to *big.Int") + } + + // set balance to zero + userBalance := userAccount.GetBalance() + err := userAccount.AddToBalance(userBalance.Neg(userBalance)) + if err != nil { + return err + } + // set provided balance + return userAccount.AddToBalance(providedBalance) +} + +func (node *testOnlyProcessingNode) setScDataIfNeeded(address []byte, userAccount state.UserAccountHandler, addressState *dtos.AddressState) error { + if !core.IsSmartContractAddress(address) { + return nil + } + + if addressState.Code != "" { + decodedCode, err := hex.DecodeString(addressState.Code) + if err != nil { + return err + } + userAccount.SetCode(decodedCode) + } + + if addressState.CodeHash != "" { + codeHash, errD := base64.StdEncoding.DecodeString(addressState.CodeHash) + if errD != nil { + return errD + } + userAccount.SetCodeHash(codeHash) + } + + if addressState.CodeMetadata != "" { + decodedCodeMetadata, errD := base64.StdEncoding.DecodeString(addressState.CodeMetadata) + if errD != nil { + return errD + } + userAccount.SetCodeMetadata(decodedCodeMetadata) + } + + if addressState.Owner != "" { + ownerAddress, errD := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(addressState.Owner) + if errD != nil { + return errD + } + userAccount.SetOwnerAddress(ownerAddress) + } + + if addressState.DeveloperRewards != "" { + developerRewards, ok := big.NewInt(0).SetString(addressState.DeveloperRewards, 10) + if !ok { + return errors.New("cannot convert string developer rewards to *big.Int") + } + userAccount.AddToDeveloperReward(developerRewards) + } + + return nil +} + +func (node *testOnlyProcessingNode) getUserAccount(address []byte) (state.UserAccountHandler, error) { + accountsAdapter := node.StateComponentsHolder.AccountsAdapter() + account, err := accountsAdapter.LoadAccount(address) + if err != nil { + return nil, err + } + + userAccount, ok := account.(state.UserAccountHandler) + if !ok { + return nil, errors.New("cannot cast AccountHandler to UserAccountHandler") + } + + return userAccount, nil +} + +// Close will call the Close methods on all inner components +func (node *testOnlyProcessingNode) Close() error { + return node.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (node *testOnlyProcessingNode) IsInterfaceNil() bool { + return node == nil +} diff --git a/node/chainSimulator/components/testOnlyProcessingNode_test.go b/node/chainSimulator/components/testOnlyProcessingNode_test.go new file mode 100644 index 00000000000..c363ca8019c --- /dev/null +++ b/node/chainSimulator/components/testOnlyProcessingNode_test.go @@ -0,0 +1,473 @@ +package components + +import ( + "errors" + "math/big" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/state" + + "github.com/multiversx/mx-chain-core-go/data/endProcess" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var expectedErr = errors.New("expected error") + +func createMockArgsTestOnlyProcessingNode(t *testing.T) ArgsTestOnlyProcessingNode { + outputConfigs, err := configs.CreateChainSimulatorConfigs(configs.ArgsChainSimulatorConfigs{ + NumOfShards: 3, + OriginalConfigsPath: "../../../cmd/node/config/", + GenesisTimeStamp: 0, + RoundDurationInMillis: 6000, + TempDir: t.TempDir(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + ConsensusGroupSize: 1, + MetaChainConsensusGroupSize: 1, + }) + require.Nil(t, err) + + return ArgsTestOnlyProcessingNode{ + Configs: outputConfigs.Configs, + GasScheduleFilename: outputConfigs.GasScheduleFilename, + NumShards: 3, + + SyncedBroadcastNetwork: NewSyncedBroadcastNetwork(), + ChanStopNodeProcess: make(chan endProcess.ArgEndProcess), + APIInterface: api.NewNoApiInterface(), + ShardIDStr: "0", + RoundDurationInMillis: 6000, + MinNodesMeta: 1, + MinNodesPerShard: 1, + ConsensusGroupSize: 1, + MetaChainConsensusGroupSize: 1, + } +} + +func TestNewTestOnlyProcessingNode(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("should work", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + node, err := NewTestOnlyProcessingNode(args) + assert.Nil(t, err) + assert.NotNil(t, node) + }) + t.Run("try commit a block", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + node, err := NewTestOnlyProcessingNode(args) + assert.Nil(t, err) + assert.NotNil(t, node) + + newHeader, err := node.ProcessComponentsHolder.BlockProcessor().CreateNewHeader(1, 1) + assert.Nil(t, err) + + err = newHeader.SetPrevHash(node.ChainHandler.GetGenesisHeaderHash()) + assert.Nil(t, err) + + header, block, err := node.ProcessComponentsHolder.BlockProcessor().CreateBlock(newHeader, func() bool { + return true + }) + assert.Nil(t, err) + require.NotNil(t, header) + require.NotNil(t, block) + + err = node.ProcessComponentsHolder.BlockProcessor().ProcessBlock(header, block, func() time.Duration { + return 1000 + }) + assert.Nil(t, err) + + err = node.ProcessComponentsHolder.BlockProcessor().CommitBlock(header, block) + assert.Nil(t, err) + }) + t.Run("CreateCoreComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.GeneralConfig.Marshalizer.Type = "invalid type" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateCryptoComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.GeneralConfig.PublicKeyPIDSignature.Type = "invalid type" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateNetworkComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.SyncedBroadcastNetwork = nil + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateBootstrapComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.FlagsConfig.WorkingDir = "" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateStateComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.ShardIDStr = common.MetachainShardName // coverage only + args.Configs.GeneralConfig.StateTriesConfig.MaxStateTrieLevelInMemory = 0 + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateProcessComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.FlagsConfig.Version = "" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("createFacade failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.EpochConfig.GasSchedule.GasScheduleByEpochs = nil + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) +} + +func TestTestOnlyProcessingNode_SetKeyValueForAddress(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + goodKeyValueMap := map[string]string{ + "01": "02", + } + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, err) + + address := "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj" + addressBytes, _ := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(address) + + t.Run("should work", func(t *testing.T) { + _, err = node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "account was not found")) + + err = node.SetKeyValueForAddress(addressBytes, goodKeyValueMap) + require.NoError(t, err) + + _, err = node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.NoError(t, err) + }) + t.Run("decode key failure should error", func(t *testing.T) { + keyValueMap := map[string]string{ + "nonHex": "01", + } + err = node.SetKeyValueForAddress(addressBytes, keyValueMap) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), "cannot decode key")) + }) + t.Run("decode value failure should error", func(t *testing.T) { + keyValueMap := map[string]string{ + "01": "nonHex", + } + err = node.SetKeyValueForAddress(addressBytes, keyValueMap) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), "cannot decode value")) + }) + t.Run("LoadAccount failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, nil) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("account un-castable to UserAccountHandler should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.PeerAccountHandlerMock{}, nil + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, nil) + require.Error(t, errLocal) + require.Equal(t, "cannot cast AccountHandler to UserAccountHandler", errLocal.Error()) + }) + t.Run("SaveKeyValue failure should error", func(t *testing.T) { + nodeLocal, errLocal := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.UserAccountStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + return expectedErr + }, + }, nil + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, goodKeyValueMap) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("SaveAccount failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + SaveAccountCalled: func(account vmcommon.AccountHandler) error { + return expectedErr + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, goodKeyValueMap) + require.Equal(t, expectedErr, errLocal) + }) +} + +func TestTestOnlyProcessingNode_SetStateForAddress(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, err) + nonce := uint64(100) + + address := "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj" + scAddress := "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + addressBytes, _ := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(address) + scAddressBytes, _ := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(scAddress) + addressState := &dtos.AddressState{ + Address: "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj", + Nonce: &nonce, + Balance: "1000000000000000000", + Pairs: map[string]string{ + "01": "02", + }, + } + + t.Run("should work", func(t *testing.T) { + _, err = node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "account was not found")) + + err = node.SetStateForAddress(addressBytes, addressState) + require.NoError(t, err) + + account, err := node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.NoError(t, err) + require.Equal(t, *addressState.Nonce, account.GetNonce()) + }) + t.Run("LoadAccount failure should error", func(t *testing.T) { + nodeLocal, errLocal := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress([]byte("address"), nil) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("state balance invalid should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Balance = "invalid balance" + err = node.SetStateForAddress(addressBytes, &addressStateCopy) + require.Error(t, err) + require.Equal(t, "cannot convert string balance to *big.Int", err.Error()) + }) + t.Run("AddToBalance failure should error", func(t *testing.T) { + nodeLocal, errLocal := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.UserAccountStub{ + AddToBalanceCalled: func(value *big.Int) error { + return expectedErr + }, + Balance: big.NewInt(0), + }, nil + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress([]byte("address"), addressState) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("SaveKeyValue failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.UserAccountStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + return expectedErr + }, + Balance: big.NewInt(0), + }, nil + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress(addressBytes, addressState) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("invalid sc code should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.Code = "invalid code" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc code hash should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.CodeHash = "invalid code hash" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc code metadata should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.CodeMetadata = "invalid code metadata" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc owner should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.Owner = "invalid owner" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc dev rewards should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Owner = address + addressStateCopy.Address = scAddress + addressStateCopy.DeveloperRewards = "invalid dev rewards" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid root hash should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Owner = address + addressStateCopy.Address = scAddress // coverage + addressStateCopy.DeveloperRewards = "1000000" + addressStateCopy.RootHash = "invalid root hash" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("SaveAccount failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + SaveAccountCalled: func(account vmcommon.AccountHandler) error { + return expectedErr + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress(addressBytes, addressState) + require.Equal(t, expectedErr, errLocal) + }) +} + +func TestTestOnlyProcessingNode_IsInterfaceNil(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + var node *testOnlyProcessingNode + require.True(t, node.IsInterfaceNil()) + + node, _ = NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.False(t, node.IsInterfaceNil()) +} + +func TestTestOnlyProcessingNode_Close(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, err) + + require.NoError(t, node.Close()) +} + +func TestTestOnlyProcessingNode_Getters(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + node := &testOnlyProcessingNode{} + require.Nil(t, node.GetProcessComponents()) + require.Nil(t, node.GetChainHandler()) + require.Nil(t, node.GetBroadcastMessenger()) + require.Nil(t, node.GetCryptoComponents()) + require.Nil(t, node.GetCoreComponents()) + require.Nil(t, node.GetStateComponents()) + require.Nil(t, node.GetFacadeHandler()) + require.Nil(t, node.GetStatusCoreComponents()) + + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.Nil(t, err) + + require.NotNil(t, node.GetProcessComponents()) + require.NotNil(t, node.GetChainHandler()) + require.NotNil(t, node.GetBroadcastMessenger()) + require.NotNil(t, node.GetShardCoordinator()) + require.NotNil(t, node.GetCryptoComponents()) + require.NotNil(t, node.GetCoreComponents()) + require.NotNil(t, node.GetStateComponents()) + require.NotNil(t, node.GetFacadeHandler()) + require.NotNil(t, node.GetStatusCoreComponents()) +} diff --git a/node/chainSimulator/configs/configs.go b/node/chainSimulator/configs/configs.go new file mode 100644 index 00000000000..22fc863c7a0 --- /dev/null +++ b/node/chainSimulator/configs/configs.go @@ -0,0 +1,464 @@ +package configs + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "encoding/pem" + "math/big" + "os" + "path" + "strconv" + "strings" + + "github.com/multiversx/mx-chain-go/common/factory" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/genesis/data" + "github.com/multiversx/mx-chain-go/node" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/storage/storageunit" + "github.com/multiversx/mx-chain-go/testscommon" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/pubkeyConverter" + shardingCore "github.com/multiversx/mx-chain-core-go/core/sharding" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-crypto-go/signing" + "github.com/multiversx/mx-chain-crypto-go/signing/ed25519" + "github.com/multiversx/mx-chain-crypto-go/signing/mcl" +) + +var oneEgld = big.NewInt(1000000000000000000) +var initialStakedEgldPerNode = big.NewInt(0).Mul(oneEgld, big.NewInt(2500)) +var initialSupply = big.NewInt(0).Mul(oneEgld, big.NewInt(20000000)) // 20 million EGLD +const ( + // ChainID contains the chain id + ChainID = "chain" + + // ChainSimulatorConsensusGroupSize defines the size of the consensus group for chain simulator + ChainSimulatorConsensusGroupSize = 1 + allValidatorsPemFileName = "allValidatorsKeys.pem" +) + +// ArgsChainSimulatorConfigs holds all the components needed to create the chain simulator configs +type ArgsChainSimulatorConfigs struct { + NumOfShards uint32 + OriginalConfigsPath string + GenesisTimeStamp int64 + RoundDurationInMillis uint64 + TempDir string + MinNodesPerShard uint32 + ConsensusGroupSize uint32 + MetaChainMinNodes uint32 + MetaChainConsensusGroupSize uint32 + InitialEpoch uint32 + RoundsPerEpoch core.OptionalUint64 + NumNodesWaitingListShard uint32 + NumNodesWaitingListMeta uint32 + AlterConfigsFunction func(cfg *config.Configs) +} + +// ArgsConfigsSimulator holds the configs for the chain simulator +type ArgsConfigsSimulator struct { + GasScheduleFilename string + Configs config.Configs + ValidatorsPrivateKeys []crypto.PrivateKey + InitialWallets *dtos.InitialWalletKeys +} + +// CreateChainSimulatorConfigs will create the chain simulator configs +func CreateChainSimulatorConfigs(args ArgsChainSimulatorConfigs) (*ArgsConfigsSimulator, error) { + configs, err := testscommon.CreateTestConfigs(args.TempDir, args.OriginalConfigsPath) + if err != nil { + return nil, err + } + + configs.GeneralConfig.GeneralSettings.ChainID = ChainID + + // empty genesis smart contracts file + err = os.WriteFile(configs.ConfigurationPathsHolder.SmartContracts, []byte("[]"), os.ModePerm) + if err != nil { + return nil, err + } + + // update genesis.json + initialWallets, err := generateGenesisFile(args, configs) + if err != nil { + return nil, err + } + + // generate validators key and nodesSetup.json + privateKeys, publicKeys, err := generateValidatorsKeyAndUpdateFiles( + configs, + initialWallets.StakeWallets, + args, + ) + if err != nil { + return nil, err + } + + configs.ConfigurationPathsHolder.AllValidatorKeys = path.Join(args.TempDir, allValidatorsPemFileName) + err = generateValidatorsPem(configs.ConfigurationPathsHolder.AllValidatorKeys, publicKeys, privateKeys) + if err != nil { + return nil, err + } + + configs.GeneralConfig.SmartContractsStorage.DB.Type = string(storageunit.MemoryDB) + configs.GeneralConfig.SmartContractsStorageForSCQuery.DB.Type = string(storageunit.MemoryDB) + configs.GeneralConfig.SmartContractsStorageSimulate.DB.Type = string(storageunit.MemoryDB) + + eligibleNodes := args.MinNodesPerShard*args.NumOfShards + args.MetaChainMinNodes + waitingNodes := args.NumNodesWaitingListShard*args.NumOfShards + args.NumNodesWaitingListMeta + + SetMaxNumberOfNodesInConfigs(configs, eligibleNodes, waitingNodes, args.NumOfShards) + + // set compatible trie configs + configs.GeneralConfig.StateTriesConfig.SnapshotsEnabled = false + + // enable db lookup extension + configs.GeneralConfig.DbLookupExtensions.Enabled = true + + configs.GeneralConfig.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds = 1 + configs.GeneralConfig.EpochStartConfig.GenesisEpoch = args.InitialEpoch + configs.GeneralConfig.EpochStartConfig.MinRoundsBetweenEpochs = 1 + + if args.RoundsPerEpoch.HasValue { + configs.GeneralConfig.EpochStartConfig.RoundsPerEpoch = int64(args.RoundsPerEpoch.Value) + } + + gasScheduleName, err := GetLatestGasScheduleFilename(configs.ConfigurationPathsHolder.GasScheduleDirectoryName) + if err != nil { + return nil, err + } + + node.ApplyArchCustomConfigs(configs) + + if args.AlterConfigsFunction != nil { + args.AlterConfigsFunction(configs) + } + + return &ArgsConfigsSimulator{ + Configs: *configs, + ValidatorsPrivateKeys: privateKeys, + GasScheduleFilename: gasScheduleName, + InitialWallets: initialWallets, + }, nil +} + +// SetMaxNumberOfNodesInConfigs will correctly set the max number of nodes in configs +func SetMaxNumberOfNodesInConfigs(cfg *config.Configs, eligibleNodes uint32, waitingNodes uint32, numOfShards uint32) { + cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake = uint64(eligibleNodes + waitingNodes) + numMaxNumNodesEnableEpochs := len(cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch) + for idx := 0; idx < numMaxNumNodesEnableEpochs-1; idx++ { + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[idx].MaxNumNodes = eligibleNodes + waitingNodes + } + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-1].EpochEnable = cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch + prevEntry := cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-2] + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-1].NodesToShufflePerShard = prevEntry.NodesToShufflePerShard + + stakingV4NumNodes := eligibleNodes + waitingNodes + if stakingV4NumNodes-(numOfShards+1)*prevEntry.NodesToShufflePerShard >= eligibleNodes { + // prevent the case in which we are decreasing the eligible number of nodes because we are working with 0 waiting list size + stakingV4NumNodes -= (numOfShards + 1) * prevEntry.NodesToShufflePerShard + } + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-1].MaxNumNodes = stakingV4NumNodes +} + +// SetQuickJailRatingConfig will set the rating config in a way that leads to rapid jailing of a node +func SetQuickJailRatingConfig(cfg *config.Configs) { + cfg.RatingsConfig.ShardChain.RatingSteps.ConsecutiveMissedBlocksPenalty = 100 + cfg.RatingsConfig.ShardChain.RatingSteps.HoursToMaxRatingFromStartRating = 1 + cfg.RatingsConfig.MetaChain.RatingSteps.ConsecutiveMissedBlocksPenalty = 100 + cfg.RatingsConfig.MetaChain.RatingSteps.HoursToMaxRatingFromStartRating = 1 +} + +// SetStakingV4ActivationEpochs configures activation epochs for Staking V4. +// It takes an initial epoch and sets three consecutive steps for enabling Staking V4 features: +// - Step 1 activation epoch +// - Step 2 activation epoch +// - Step 3 activation epoch +func SetStakingV4ActivationEpochs(cfg *config.Configs, initialEpoch uint32) { + cfg.EpochConfig.EnableEpochs.StakeLimitsEnableEpoch = initialEpoch + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = initialEpoch + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = initialEpoch + 1 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = initialEpoch + 2 + + // Set the MaxNodesChange enable epoch for index 2 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = initialEpoch + 2 + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 +} + +func generateGenesisFile(args ArgsChainSimulatorConfigs, configs *config.Configs) (*dtos.InitialWalletKeys, error) { + addressConverter, err := factory.NewPubkeyConverter(configs.GeneralConfig.AddressPubkeyConverter) + if err != nil { + return nil, err + } + + initialWalletKeys := &dtos.InitialWalletKeys{ + BalanceWallets: make(map[uint32]*dtos.WalletKey), + StakeWallets: make([]*dtos.WalletKey, 0), + } + + addresses := make([]data.InitialAccount, 0) + numOfNodes := int((args.NumNodesWaitingListShard+args.MinNodesPerShard)*args.NumOfShards + args.NumNodesWaitingListMeta + args.MetaChainMinNodes) + for i := 0; i < numOfNodes; i++ { + wallet, errGenerate := generateWalletKey(addressConverter) + if errGenerate != nil { + return nil, errGenerate + } + + stakedValue := big.NewInt(0).Set(initialStakedEgldPerNode) + addresses = append(addresses, data.InitialAccount{ + Address: wallet.Address.Bech32, + StakingValue: stakedValue, + Supply: stakedValue, + }) + + initialWalletKeys.StakeWallets = append(initialWalletKeys.StakeWallets, wallet) + } + + // generate an address for every shard + initialBalance := big.NewInt(0).Set(initialSupply) + totalStakedValue := big.NewInt(int64(numOfNodes)) + totalStakedValue = totalStakedValue.Mul(totalStakedValue, big.NewInt(0).Set(initialStakedEgldPerNode)) + initialBalance = initialBalance.Sub(initialBalance, totalStakedValue) + + walletBalance := big.NewInt(0).Set(initialBalance) + walletBalance.Div(walletBalance, big.NewInt(int64(args.NumOfShards))) + + // remainder = balance % numTotalWalletKeys + remainder := big.NewInt(0).Set(initialBalance) + remainder.Mod(remainder, big.NewInt(int64(args.NumOfShards))) + + for shardID := uint32(0); shardID < args.NumOfShards; shardID++ { + walletKey, errG := generateWalletKeyForShard(shardID, args.NumOfShards, addressConverter) + if errG != nil { + return nil, errG + } + + addresses = append(addresses, data.InitialAccount{ + Address: walletKey.Address.Bech32, + Balance: big.NewInt(0).Set(walletBalance), + Supply: big.NewInt(0).Set(walletBalance), + }) + + initialWalletKeys.BalanceWallets[shardID] = walletKey + } + + addresses[len(addresses)-1].Balance.Add(walletBalance, remainder) + addresses[len(addresses)-1].Supply.Add(walletBalance, remainder) + + addressesBytes, errM := json.Marshal(addresses) + if errM != nil { + return nil, errM + } + + err = os.WriteFile(configs.ConfigurationPathsHolder.Genesis, addressesBytes, os.ModePerm) + if err != nil { + return nil, err + } + + return initialWalletKeys, nil +} + +func generateValidatorsKeyAndUpdateFiles( + configs *config.Configs, + stakeWallets []*dtos.WalletKey, + args ArgsChainSimulatorConfigs, +) ([]crypto.PrivateKey, []crypto.PublicKey, error) { + blockSigningGenerator := signing.NewKeyGenerator(mcl.NewSuiteBLS12()) + + nodesSetupFile := configs.ConfigurationPathsHolder.Nodes + nodes := &sharding.NodesSetup{} + err := core.LoadJsonFile(nodes, nodesSetupFile) + if err != nil { + return nil, nil, err + } + + nodes.RoundDuration = args.RoundDurationInMillis + nodes.StartTime = args.GenesisTimeStamp + + nodes.ConsensusGroupSize = args.ConsensusGroupSize + nodes.MetaChainConsensusGroupSize = args.MetaChainConsensusGroupSize + nodes.Hysteresis = 0 + + nodes.MinNodesPerShard = args.MinNodesPerShard + nodes.MetaChainMinNodes = args.MetaChainMinNodes + + nodes.InitialNodes = make([]*sharding.InitialNode, 0) + privateKeys := make([]crypto.PrivateKey, 0) + publicKeys := make([]crypto.PublicKey, 0) + walletIndex := 0 + // generate meta keys + for idx := uint32(0); idx < args.NumNodesWaitingListMeta+args.MetaChainMinNodes; idx++ { + sk, pk := blockSigningGenerator.GeneratePair() + privateKeys = append(privateKeys, sk) + publicKeys = append(publicKeys, pk) + + pkBytes, errB := pk.ToByteArray() + if errB != nil { + return nil, nil, errB + } + + nodes.InitialNodes = append(nodes.InitialNodes, &sharding.InitialNode{ + PubKey: hex.EncodeToString(pkBytes), + Address: stakeWallets[walletIndex].Address.Bech32, + }) + + walletIndex++ + } + + // generate shard keys + for idx1 := uint32(0); idx1 < args.NumOfShards; idx1++ { + for idx2 := uint32(0); idx2 < args.NumNodesWaitingListShard+args.MinNodesPerShard; idx2++ { + sk, pk := blockSigningGenerator.GeneratePair() + privateKeys = append(privateKeys, sk) + publicKeys = append(publicKeys, pk) + + pkBytes, errB := pk.ToByteArray() + if errB != nil { + return nil, nil, errB + } + + nodes.InitialNodes = append(nodes.InitialNodes, &sharding.InitialNode{ + PubKey: hex.EncodeToString(pkBytes), + Address: stakeWallets[walletIndex].Address.Bech32, + }) + walletIndex++ + } + } + + marshaledNodes, err := json.Marshal(nodes) + if err != nil { + return nil, nil, err + } + + err = os.WriteFile(nodesSetupFile, marshaledNodes, os.ModePerm) + if err != nil { + return nil, nil, err + } + + return privateKeys, publicKeys, nil +} + +func generateValidatorsPem(validatorsFile string, publicKeys []crypto.PublicKey, privateKey []crypto.PrivateKey) error { + validatorPubKeyConverter, err := pubkeyConverter.NewHexPubkeyConverter(96) + if err != nil { + return err + } + + buff := bytes.Buffer{} + for idx := 0; idx < len(publicKeys); idx++ { + publicKeyBytes, errA := publicKeys[idx].ToByteArray() + if errA != nil { + return errA + } + + pkString, errE := validatorPubKeyConverter.Encode(publicKeyBytes) + if errE != nil { + return errE + } + + privateKeyBytes, errP := privateKey[idx].ToByteArray() + if errP != nil { + return errP + } + + blk := pem.Block{ + Type: "PRIVATE KEY for " + pkString, + Bytes: []byte(hex.EncodeToString(privateKeyBytes)), + } + + err = pem.Encode(&buff, &blk) + if err != nil { + return err + } + } + + return os.WriteFile(validatorsFile, buff.Bytes(), 0644) +} + +// GetLatestGasScheduleFilename will parse the provided path and get the latest gas schedule filename +func GetLatestGasScheduleFilename(directory string) (string, error) { + entries, err := os.ReadDir(directory) + if err != nil { + return "", err + } + + extension := ".toml" + versionMarker := "V" + + highestVersion := 0 + filename := "" + for _, entry := range entries { + if entry.IsDir() { + continue + } + + name := entry.Name() + splt := strings.Split(name, versionMarker) + if len(splt) != 2 { + continue + } + + versionAsString := splt[1][:len(splt[1])-len(extension)] + number, errConversion := strconv.Atoi(versionAsString) + if errConversion != nil { + continue + } + + if number > highestVersion { + highestVersion = number + filename = name + } + } + + return path.Join(directory, filename), nil +} + +func generateWalletKeyForShard(shardID, numOfShards uint32, converter core.PubkeyConverter) (*dtos.WalletKey, error) { + for { + walletKey, err := generateWalletKey(converter) + if err != nil { + return nil, err + } + + addressShardID := shardingCore.ComputeShardID(walletKey.Address.Bytes, numOfShards) + if addressShardID != shardID { + continue + } + + return walletKey, nil + } +} + +func generateWalletKey(converter core.PubkeyConverter) (*dtos.WalletKey, error) { + walletSuite := ed25519.NewEd25519() + walletKeyGenerator := signing.NewKeyGenerator(walletSuite) + + sk, pk := walletKeyGenerator.GeneratePair() + pubKeyBytes, err := pk.ToByteArray() + if err != nil { + return nil, err + } + + privateKeyBytes, err := sk.ToByteArray() + if err != nil { + return nil, err + } + + bech32Address, err := converter.Encode(pubKeyBytes) + if err != nil { + return nil, err + } + + return &dtos.WalletKey{ + Address: dtos.WalletAddress{ + Bech32: bech32Address, + Bytes: pubKeyBytes, + }, + PrivateKeyHex: hex.EncodeToString(privateKeyBytes), + }, nil +} diff --git a/node/chainSimulator/configs/configs_test.go b/node/chainSimulator/configs/configs_test.go new file mode 100644 index 00000000000..03e464c5f36 --- /dev/null +++ b/node/chainSimulator/configs/configs_test.go @@ -0,0 +1,31 @@ +package configs + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/integrationTests/realcomponents" + + "github.com/stretchr/testify/require" +) + +func TestNewProcessorRunnerChainArguments(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + outputConfig, err := CreateChainSimulatorConfigs(ArgsChainSimulatorConfigs{ + NumOfShards: 3, + OriginalConfigsPath: "../../../cmd/node/config", + RoundDurationInMillis: 6000, + GenesisTimeStamp: 0, + TempDir: t.TempDir(), + MetaChainMinNodes: 1, + MinNodesPerShard: 1, + ConsensusGroupSize: 1, + MetaChainConsensusGroupSize: 1, + }) + require.Nil(t, err) + + pr := realcomponents.NewProcessorRunner(t, outputConfig.Configs) + pr.Close(t) +} diff --git a/node/chainSimulator/disabled/antiflooder.go b/node/chainSimulator/disabled/antiflooder.go new file mode 100644 index 00000000000..0d4c45fd0e3 --- /dev/null +++ b/node/chainSimulator/disabled/antiflooder.go @@ -0,0 +1,72 @@ +package disabled + +import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/process" +) + +type antiFlooder struct { +} + +// NewAntiFlooder creates a new instance of disabled antiflooder +func NewAntiFlooder() *antiFlooder { + return &antiFlooder{} +} + +// CanProcessMessage returns nil +func (a *antiFlooder) CanProcessMessage(_ p2p.MessageP2P, _ core.PeerID) error { + return nil +} + +// IsOriginatorEligibleForTopic does nothing and returns nil +func (a *antiFlooder) IsOriginatorEligibleForTopic(_ core.PeerID, _ string) error { + return nil +} + +// CanProcessMessagesOnTopic does nothing and returns nil +func (a *antiFlooder) CanProcessMessagesOnTopic(_ core.PeerID, _ string, _ uint32, _ uint64, _ []byte) error { + return nil +} + +// ApplyConsensusSize does nothing +func (a *antiFlooder) ApplyConsensusSize(_ int) { +} + +// SetDebugger does nothing and returns nil +func (a *antiFlooder) SetDebugger(_ process.AntifloodDebugger) error { + return nil +} + +// BlacklistPeer does nothing +func (a *antiFlooder) BlacklistPeer(_ core.PeerID, _ string, _ time.Duration) { +} + +// ResetForTopic does nothing +func (a *antiFlooder) ResetForTopic(_ string) { +} + +// SetMaxMessagesForTopic does nothing +func (a *antiFlooder) SetMaxMessagesForTopic(_ string, _ uint32) { +} + +// SetPeerValidatorMapper does nothing and returns nil +func (a *antiFlooder) SetPeerValidatorMapper(_ process.PeerValidatorMapper) error { + return nil +} + +// SetTopicsForAll does nothing +func (a *antiFlooder) SetTopicsForAll(_ ...string) { +} + +// Close does nothing and returns nil +func (a *antiFlooder) Close() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (a *antiFlooder) IsInterfaceNil() bool { + return a == nil +} diff --git a/node/chainSimulator/disabled/peerHonesty.go b/node/chainSimulator/disabled/peerHonesty.go new file mode 100644 index 00000000000..87552b29e43 --- /dev/null +++ b/node/chainSimulator/disabled/peerHonesty.go @@ -0,0 +1,23 @@ +package disabled + +type peerHonesty struct { +} + +// NewPeerHonesty creates a new instance of disabled peer honesty +func NewPeerHonesty() *peerHonesty { + return &peerHonesty{} +} + +// ChangeScore does nothing +func (p *peerHonesty) ChangeScore(_ string, _ string, _ int) { +} + +// Close does nothing and returns nil +func (p *peerHonesty) Close() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (p *peerHonesty) IsInterfaceNil() bool { + return p == nil +} diff --git a/node/chainSimulator/disabled/peersRatingMonitor.go b/node/chainSimulator/disabled/peersRatingMonitor.go new file mode 100644 index 00000000000..425b63fdc8c --- /dev/null +++ b/node/chainSimulator/disabled/peersRatingMonitor.go @@ -0,0 +1,21 @@ +package disabled + +import "github.com/multiversx/mx-chain-go/p2p" + +type peersRatingMonitor struct { +} + +// NewPeersRatingMonitor will create a new disabled peersRatingMonitor instance +func NewPeersRatingMonitor() *peersRatingMonitor { + return &peersRatingMonitor{} +} + +// GetConnectedPeersRatings returns an empty string since it is a disabled component +func (monitor *peersRatingMonitor) GetConnectedPeersRatings(_ p2p.ConnectionsHandler) (string, error) { + return "", nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (monitor *peersRatingMonitor) IsInterfaceNil() bool { + return monitor == nil +} diff --git a/node/chainSimulator/dtos/keys.go b/node/chainSimulator/dtos/keys.go new file mode 100644 index 00000000000..7f4c0e613e9 --- /dev/null +++ b/node/chainSimulator/dtos/keys.go @@ -0,0 +1,25 @@ +package dtos + +// WalletKey holds the public and the private key of a wallet +type WalletKey struct { + Address WalletAddress `json:"address"` + PrivateKeyHex string `json:"privateKeyHex"` +} + +// InitialWalletKeys holds the initial wallet keys +type InitialWalletKeys struct { + StakeWallets []*WalletKey `json:"stakeWallets"` + BalanceWallets map[uint32]*WalletKey `json:"balanceWallets"` +} + +// WalletAddress holds the address in multiple formats +type WalletAddress struct { + Bech32 string `json:"bech32"` + Bytes []byte `json:"bytes"` +} + +// BLSKey holds the BLS key in multiple formats +type BLSKey struct { + Hex string + Bytes []byte +} diff --git a/node/chainSimulator/dtos/state.go b/node/chainSimulator/dtos/state.go new file mode 100644 index 00000000000..cfcf12070bc --- /dev/null +++ b/node/chainSimulator/dtos/state.go @@ -0,0 +1,15 @@ +package dtos + +// AddressState will hold the address state +type AddressState struct { + Address string `json:"address"` + Nonce *uint64 `json:"nonce,omitempty"` + Balance string `json:"balance,omitempty"` + Code string `json:"code,omitempty"` + RootHash string `json:"rootHash,omitempty"` + CodeMetadata string `json:"codeMetadata,omitempty"` + CodeHash string `json:"codeHash,omitempty"` + DeveloperRewards string `json:"developerReward,omitempty"` + Owner string `json:"ownerAddress,omitempty"` + Pairs map[string]string `json:"pairs,omitempty"` +} diff --git a/node/chainSimulator/errors.go b/node/chainSimulator/errors.go new file mode 100644 index 00000000000..57f0db0c457 --- /dev/null +++ b/node/chainSimulator/errors.go @@ -0,0 +1,9 @@ +package chainSimulator + +import "errors" + +var ( + errNilChainSimulator = errors.New("nil chain simulator") + errNilMetachainNode = errors.New("nil metachain node") + errShardSetupError = errors.New("shard setup error") +) diff --git a/node/chainSimulator/errors/errors.go b/node/chainSimulator/errors/errors.go new file mode 100644 index 00000000000..c1be2d016b1 --- /dev/null +++ b/node/chainSimulator/errors/errors.go @@ -0,0 +1,12 @@ +package errors + +import "errors" + +// ErrEmptySliceOfTxs signals that an empty slice of transactions has been provided +var ErrEmptySliceOfTxs = errors.New("empty slice of transactions to send") + +// ErrNilTransaction signals that a nil transaction has been provided +var ErrNilTransaction = errors.New("nil transaction") + +// ErrInvalidMaxNumOfBlocks signals that an invalid max numerof blocks has been provided +var ErrInvalidMaxNumOfBlocks = errors.New("invalid max number of blocks to generate") diff --git a/node/chainSimulator/facade.go b/node/chainSimulator/facade.go new file mode 100644 index 00000000000..8cf4d1f50b6 --- /dev/null +++ b/node/chainSimulator/facade.go @@ -0,0 +1,54 @@ +package chainSimulator + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +type chainSimulatorFacade struct { + chainSimulator ChainSimulator + metaNode process.NodeHandler +} + +// NewChainSimulatorFacade returns the chain simulator facade +func NewChainSimulatorFacade(chainSimulator ChainSimulator) (*chainSimulatorFacade, error) { + if check.IfNil(chainSimulator) { + return nil, errNilChainSimulator + } + + metaNode := chainSimulator.GetNodeHandler(common.MetachainShardId) + if check.IfNil(metaNode) { + return nil, errNilMetachainNode + } + + return &chainSimulatorFacade{ + chainSimulator: chainSimulator, + metaNode: metaNode, + }, nil +} + +// GetExistingAccountFromBech32AddressString will return the existing account for the provided address in bech32 format +func (f *chainSimulatorFacade) GetExistingAccountFromBech32AddressString(address string) (vmcommon.UserAccountHandler, error) { + addressBytes, err := f.metaNode.GetCoreComponents().AddressPubKeyConverter().Decode(address) + if err != nil { + return nil, err + } + + shardID := f.metaNode.GetShardCoordinator().ComputeId(addressBytes) + + shardNodeHandler := f.chainSimulator.GetNodeHandler(shardID) + if check.IfNil(shardNodeHandler) { + return nil, fmt.Errorf("%w missing node handler for shard %d", errShardSetupError, shardID) + } + + account, err := shardNodeHandler.GetStateComponents().AccountsAdapter().GetExistingAccount(addressBytes) + if err != nil { + return nil, err + } + + return account.(vmcommon.UserAccountHandler), nil +} diff --git a/node/chainSimulator/facade_test.go b/node/chainSimulator/facade_test.go new file mode 100644 index 00000000000..908704c05a0 --- /dev/null +++ b/node/chainSimulator/facade_test.go @@ -0,0 +1,193 @@ +package chainSimulator + +import ( + "errors" + "testing" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/factory" + factoryMock "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/chainSimulator" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/vmcommonMocks" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +var expectedErr = errors.New("expected error") + +func TestNewChainSimulatorFacade(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{} + }, + }) + require.NoError(t, err) + require.NotNil(t, facade) + }) + t.Run("nil chain simulator should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(nil) + require.Equal(t, errNilChainSimulator, err) + require.Nil(t, facade) + }) + t.Run("nil node handler returned by chain simulator should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return nil + }, + }) + require.Equal(t, errNilMetachainNode, err) + require.Nil(t, facade) + }) +} + +func TestChainSimulatorFacade_GetExistingAccountFromBech32AddressString(t *testing.T) { + t.Parallel() + + t.Run("address decode failure should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{ + DecodeCalled: func(humanReadable string) ([]byte, error) { + return nil, expectedErr + }, + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.Equal(t, expectedErr, err) + require.Nil(t, handler) + }) + t.Run("nil shard node should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + if shardID != common.MetachainShardId { + return nil + } + + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{}, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{ + ComputeIdCalled: func(address []byte) uint32 { + return 0 + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.True(t, errors.Is(err, errShardSetupError)) + require.Nil(t, handler) + }) + t.Run("shard node GetExistingAccount should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{}, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{ + ComputeIdCalled: func(address []byte) uint32 { + return 0 + }, + } + }, + GetStateComponentsCalled: func() factory.StateComponentsHolder { + return &factoryMock.StateComponentsHolderStub{ + AccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMock.AccountsStub{ + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + } + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.Equal(t, expectedErr, err) + require.Nil(t, handler) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedAccount := &vmcommonMocks.UserAccountStub{} + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{}, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{ + ComputeIdCalled: func(address []byte) uint32 { + return 0 + }, + } + }, + GetStateComponentsCalled: func() factory.StateComponentsHolder { + return &factoryMock.StateComponentsHolderStub{ + AccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMock.AccountsStub{ + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return providedAccount, nil + }, + } + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.NoError(t, err) + require.True(t, handler == providedAccount) // pointer testing + }) +} diff --git a/node/chainSimulator/interface.go b/node/chainSimulator/interface.go new file mode 100644 index 00000000000..0b2f51ca457 --- /dev/null +++ b/node/chainSimulator/interface.go @@ -0,0 +1,17 @@ +package chainSimulator + +import "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + +// ChainHandler defines what a chain handler should be able to do +type ChainHandler interface { + IncrementRound() + CreateNewBlock() error + IsInterfaceNil() bool +} + +// ChainSimulator defines what a chain simulator should be able to do +type ChainSimulator interface { + GenerateBlocks(numOfBlocks int) error + GetNodeHandler(shardID uint32) process.NodeHandler + IsInterfaceNil() bool +} diff --git a/node/chainSimulator/process/errors.go b/node/chainSimulator/process/errors.go new file mode 100644 index 00000000000..eb1a69656e7 --- /dev/null +++ b/node/chainSimulator/process/errors.go @@ -0,0 +1,6 @@ +package process + +import "errors" + +// ErrNilNodeHandler signals that a nil node handler has been provided +var ErrNilNodeHandler = errors.New("nil node handler") diff --git a/node/chainSimulator/process/interface.go b/node/chainSimulator/process/interface.go new file mode 100644 index 00000000000..47f937fb97c --- /dev/null +++ b/node/chainSimulator/process/interface.go @@ -0,0 +1,30 @@ +package process + +import ( + chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/sharding" +) + +// NodeHandler defines what a node handler should be able to do +type NodeHandler interface { + GetProcessComponents() factory.ProcessComponentsHolder + GetChainHandler() chainData.ChainHandler + GetBroadcastMessenger() consensus.BroadcastMessenger + GetShardCoordinator() sharding.Coordinator + GetCryptoComponents() factory.CryptoComponentsHolder + GetCoreComponents() factory.CoreComponentsHolder + GetDataComponents() factory.DataComponentsHolder + GetStateComponents() factory.StateComponentsHolder + GetFacadeHandler() shared.FacadeHandler + GetStatusCoreComponents() factory.StatusCoreComponentsHolder + SetKeyValueForAddress(addressBytes []byte, state map[string]string) error + SetStateForAddress(address []byte, state *dtos.AddressState) error + RemoveAccount(address []byte) error + ForceChangeOfEpoch() error + Close() error + IsInterfaceNil() bool +} diff --git a/node/chainSimulator/process/processor.go b/node/chainSimulator/process/processor.go new file mode 100644 index 00000000000..d8f225bfde8 --- /dev/null +++ b/node/chainSimulator/process/processor.go @@ -0,0 +1,233 @@ +package process + +import ( + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("process-block") + +type manualRoundHandler interface { + IncrementIndex() +} + +type blocksCreator struct { + nodeHandler NodeHandler +} + +// NewBlocksCreator will create a new instance of blocksCreator +func NewBlocksCreator(nodeHandler NodeHandler) (*blocksCreator, error) { + if check.IfNil(nodeHandler) { + return nil, ErrNilNodeHandler + } + + return &blocksCreator{ + nodeHandler: nodeHandler, + }, nil +} + +// IncrementRound will increment the current round +func (creator *blocksCreator) IncrementRound() { + roundHandler := creator.nodeHandler.GetCoreComponents().RoundHandler() + manual := roundHandler.(manualRoundHandler) + manual.IncrementIndex() + + creator.nodeHandler.GetStatusCoreComponents().AppStatusHandler().SetUInt64Value(common.MetricCurrentRound, uint64(roundHandler.Index())) +} + +// CreateNewBlock creates and process a new block +func (creator *blocksCreator) CreateNewBlock() error { + bp := creator.nodeHandler.GetProcessComponents().BlockProcessor() + + nonce, _, prevHash, prevRandSeed, epoch := creator.getPreviousHeaderData() + round := creator.nodeHandler.GetCoreComponents().RoundHandler().Index() + newHeader, err := bp.CreateNewHeader(uint64(round), nonce+1) + if err != nil { + return err + } + + shardID := creator.nodeHandler.GetShardCoordinator().SelfId() + err = newHeader.SetShardID(shardID) + if err != nil { + return err + } + + err = newHeader.SetPrevHash(prevHash) + if err != nil { + return err + } + + err = newHeader.SetPrevRandSeed(prevRandSeed) + if err != nil { + return err + } + + err = newHeader.SetPubKeysBitmap([]byte{1}) + if err != nil { + return err + } + + err = newHeader.SetChainID([]byte(configs.ChainID)) + if err != nil { + return err + } + + headerCreationTime := creator.nodeHandler.GetCoreComponents().RoundHandler().TimeStamp() + err = newHeader.SetTimeStamp(uint64(headerCreationTime.Unix())) + if err != nil { + return err + } + + validatorsGroup, err := creator.nodeHandler.GetProcessComponents().NodesCoordinator().ComputeConsensusGroup(prevRandSeed, newHeader.GetRound(), shardID, epoch) + if err != nil { + return err + } + blsKey := validatorsGroup[spos.IndexOfLeaderInConsensusGroup] + + isManaged := creator.nodeHandler.GetCryptoComponents().KeysHandler().IsKeyManagedByCurrentNode(blsKey.PubKey()) + if !isManaged { + log.Debug("cannot propose block - leader bls key is missing", + "leader key", blsKey.PubKey(), + "shard", creator.nodeHandler.GetShardCoordinator().SelfId()) + return nil + } + + signingHandler := creator.nodeHandler.GetCryptoComponents().ConsensusSigningHandler() + randSeed, err := signingHandler.CreateSignatureForPublicKey(newHeader.GetPrevRandSeed(), blsKey.PubKey()) + if err != nil { + return err + } + err = newHeader.SetRandSeed(randSeed) + if err != nil { + return err + } + + header, block, err := bp.CreateBlock(newHeader, func() bool { + return true + }) + if err != nil { + return err + } + + err = creator.setHeaderSignatures(header, blsKey.PubKey()) + if err != nil { + return err + } + + err = bp.CommitBlock(header, block) + if err != nil { + return err + } + + miniBlocks, transactions, err := bp.MarshalizedDataToBroadcast(header, block) + if err != nil { + return err + } + + err = creator.nodeHandler.GetBroadcastMessenger().BroadcastHeader(header, blsKey.PubKey()) + if err != nil { + return err + } + + err = creator.nodeHandler.GetBroadcastMessenger().BroadcastMiniBlocks(miniBlocks, blsKey.PubKey()) + if err != nil { + return err + } + + return creator.nodeHandler.GetBroadcastMessenger().BroadcastTransactions(transactions, blsKey.PubKey()) +} + +func (creator *blocksCreator) getPreviousHeaderData() (nonce, round uint64, prevHash, prevRandSeed []byte, epoch uint32) { + currentHeader := creator.nodeHandler.GetChainHandler().GetCurrentBlockHeader() + + if currentHeader != nil { + nonce, round = currentHeader.GetNonce(), currentHeader.GetRound() + prevHash = creator.nodeHandler.GetChainHandler().GetCurrentBlockHeaderHash() + prevRandSeed = currentHeader.GetRandSeed() + epoch = currentHeader.GetEpoch() + return + } + + prevHash = creator.nodeHandler.GetChainHandler().GetGenesisHeaderHash() + prevRandSeed = creator.nodeHandler.GetChainHandler().GetGenesisHeader().GetRandSeed() + round = uint64(creator.nodeHandler.GetCoreComponents().RoundHandler().Index()) - 1 + epoch = creator.nodeHandler.GetChainHandler().GetGenesisHeader().GetEpoch() + nonce = creator.nodeHandler.GetChainHandler().GetGenesisHeader().GetNonce() + + return +} + +func (creator *blocksCreator) setHeaderSignatures(header data.HeaderHandler, blsKeyBytes []byte) error { + signingHandler := creator.nodeHandler.GetCryptoComponents().ConsensusSigningHandler() + headerClone := header.ShallowClone() + _ = headerClone.SetPubKeysBitmap(nil) + + marshalizedHdr, err := creator.nodeHandler.GetCoreComponents().InternalMarshalizer().Marshal(headerClone) + if err != nil { + return err + } + + err = signingHandler.Reset([]string{string(blsKeyBytes)}) + if err != nil { + return err + } + + headerHash := creator.nodeHandler.GetCoreComponents().Hasher().Compute(string(marshalizedHdr)) + _, err = signingHandler.CreateSignatureShareForPublicKey( + headerHash, + uint16(0), + header.GetEpoch(), + blsKeyBytes, + ) + if err != nil { + return err + } + + sig, err := signingHandler.AggregateSigs(header.GetPubKeysBitmap(), header.GetEpoch()) + if err != nil { + return err + } + + err = header.SetSignature(sig) + if err != nil { + return err + } + + leaderSignature, err := creator.createLeaderSignature(header, blsKeyBytes) + if err != nil { + return err + } + + err = header.SetLeaderSignature(leaderSignature) + if err != nil { + return err + } + + return nil +} + +func (creator *blocksCreator) createLeaderSignature(header data.HeaderHandler, blsKeyBytes []byte) ([]byte, error) { + headerClone := header.ShallowClone() + err := headerClone.SetLeaderSignature(nil) + if err != nil { + return nil, err + } + + marshalizedHdr, err := creator.nodeHandler.GetCoreComponents().InternalMarshalizer().Marshal(headerClone) + if err != nil { + return nil, err + } + + signingHandler := creator.nodeHandler.GetCryptoComponents().ConsensusSigningHandler() + + return signingHandler.CreateSignatureForPublicKey(marshalizedHdr, blsKeyBytes) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (creator *blocksCreator) IsInterfaceNil() bool { + return creator == nil +} diff --git a/node/chainSimulator/process/processor_test.go b/node/chainSimulator/process/processor_test.go new file mode 100644 index 00000000000..80ffd568134 --- /dev/null +++ b/node/chainSimulator/process/processor_test.go @@ -0,0 +1,631 @@ +package process_test + +import ( + "errors" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + mockConsensus "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/chainSimulator" + testsConsensus "github.com/multiversx/mx-chain-go/testscommon/consensus" + testsFactory "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +var expectedErr = errors.New("expected error") + +func TestNewBlocksCreator(t *testing.T) { + t.Parallel() + + t.Run("nil node handler should error", func(t *testing.T) { + t.Parallel() + + creator, err := chainSimulatorProcess.NewBlocksCreator(nil) + require.Equal(t, chainSimulatorProcess.ErrNilNodeHandler, err) + require.Nil(t, creator) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + creator, err := chainSimulatorProcess.NewBlocksCreator(&chainSimulator.NodeHandlerMock{}) + require.NoError(t, err) + require.NotNil(t, creator) + }) +} + +func TestBlocksCreator_IsInterfaceNil(t *testing.T) { + t.Parallel() + + creator, _ := chainSimulatorProcess.NewBlocksCreator(nil) + require.True(t, creator.IsInterfaceNil()) + + creator, _ = chainSimulatorProcess.NewBlocksCreator(&chainSimulator.NodeHandlerMock{}) + require.False(t, creator.IsInterfaceNil()) +} + +func TestBlocksCreator_IncrementRound(t *testing.T) { + t.Parallel() + + wasIncrementIndexCalled := false + wasSetUInt64ValueCalled := false + nodeHandler := &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &testsFactory.CoreComponentsHolderStub{ + RoundHandlerCalled: func() consensus.RoundHandler { + return &testscommon.RoundHandlerMock{ + IncrementIndexCalled: func() { + wasIncrementIndexCalled = true + }, + } + }, + } + }, + GetStatusCoreComponentsCalled: func() factory.StatusCoreComponentsHolder { + return &testsFactory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + wasSetUInt64ValueCalled = true + require.Equal(t, common.MetricCurrentRound, key) + }, + }, + } + }, + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + creator.IncrementRound() + require.True(t, wasIncrementIndexCalled) + require.True(t, wasSetUInt64ValueCalled) +} + +func TestBlocksCreator_CreateNewBlock(t *testing.T) { + t.Parallel() + + t.Run("CreateNewHeader failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return nil, expectedErr + }, + } + nodeHandler := getNodeHandler() + nodeHandler.GetProcessComponentsCalled = func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: blockProcess, + } + } + nodeHandler.GetChainHandlerCalled = func() data.ChainHandler { + return &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.HeaderV2{} // coverage for getPreviousHeaderData + }, + } + } + + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("SetShardID failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetShardIDCalled: func(shardId uint32) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetPrevHash failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetPrevHashCalled: func(hash []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetPrevRandSeed failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetPrevRandSeedCalled: func(seed []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetPubKeysBitmap failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetPubKeysBitmapCalled: func(bitmap []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetChainID failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetChainIDCalled: func(chainID []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetTimeStamp failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetTimeStampCalled: func(timestamp uint64) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("ComputeConsensusGroup failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + nodeHandler.GetProcessComponentsCalled = func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + }, + NodesCoord: &shardingMocks.NodesCoordinatorStub{ + ComputeConsensusGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("key not managed by the current node should return nil", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return false + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.NoError(t, err) + }) + t.Run("CreateSignatureForPublicKey failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + CreateSignatureForPublicKeyCalled: func(message []byte, publicKeyBytes []byte) ([]byte, error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("SetRandSeed failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetRandSeedCalled: func(seed []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("CreateBlock failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return nil, nil, expectedErr + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("setHeaderSignatures.Marshal failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + rh := nodeHandler.GetCoreComponents().RoundHandler() + nodeHandler.GetCoreComponentsCalled = func() factory.CoreComponentsHolder { + return &testsFactory.CoreComponentsHolderStub{ + RoundHandlerCalled: func() consensus.RoundHandler { + return rh + }, + InternalMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.Reset failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + ResetCalled: func(pubKeys []string) error { + return expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.CreateSignatureShareForPublicKey failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(message []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.AggregateSigs failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + AggregateSigsCalled: func(bitmap []byte, epoch uint32) ([]byte, error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.SetSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + SetSignatureCalled: func(signature []byte) error { + return expectedErr + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("createLeaderSignature.SetLeaderSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{ + SetLeaderSignatureCalled: func(signature []byte) error { + return expectedErr + }, + } + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("createLeaderSignature.SetLeaderSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{ + SetLeaderSignatureCalled: func(signature []byte) error { + return expectedErr + }, + } + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("setHeaderSignatures.SetLeaderSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + SetLeaderSignatureCalled: func(signature []byte) error { + return expectedErr + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("CommitBlock failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, &block.Body{}, nil + }, + CommitBlockCalled: func(header data.HeaderHandler, body data.BodyHandler) error { + return expectedErr + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("MarshalizedDataToBroadcast failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, &block.Body{}, nil + }, + MarshalizedDataToBroadcastCalled: func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + return nil, nil, expectedErr + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("BroadcastHeader failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + nodeHandler.GetBroadcastMessengerCalled = func() consensus.BroadcastMessenger { + return &mockConsensus.BroadcastMessengerMock{ + BroadcastHeaderCalled: func(handler data.HeaderHandler, bytes []byte) error { + return expectedErr + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + creator, err := chainSimulatorProcess.NewBlocksCreator(getNodeHandler()) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.NoError(t, err) + }) +} + +func testCreateNewBlock(t *testing.T, blockProcess process.BlockProcessor, expectedErr error) { + nodeHandler := getNodeHandler() + nc := nodeHandler.GetProcessComponents().NodesCoordinator() + nodeHandler.GetProcessComponentsCalled = func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: blockProcess, + NodesCoord: nc, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) +} + +func getNodeHandler() *chainSimulator.NodeHandlerMock { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &testsFactory.CoreComponentsHolderStub{ + RoundHandlerCalled: func() consensus.RoundHandler { + return &testscommon.RoundHandlerMock{ + TimeStampCalled: func() time.Time { + return time.Now() + }, + } + }, + InternalMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{} + }, + HasherCalled: func() hashing.Hasher { + return &testscommon.HasherStub{ + ComputeCalled: func(s string) []byte { + return []byte("hash") + }, + } + }, + } + }, + GetProcessComponentsCalled: func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + haveTime() // coverage only + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, &block.Body{}, nil + }, + MarshalizedDataToBroadcastCalled: func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + return make(map[uint32][]byte), make(map[string][][]byte), nil + }, + }, + NodesCoord: &shardingMocks.NodesCoordinatorStub{ + ComputeConsensusGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return []nodesCoordinator.Validator{ + shardingMocks.NewValidatorMock([]byte("A"), 1, 1), + }, nil + }, + }, + } + }, + GetChainHandlerCalled: func() data.ChainHandler { + return &testscommon.ChainHandlerStub{ + GetGenesisHeaderCalled: func() data.HeaderHandler { + return &block.HeaderV2{} + }, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{} + }, + GetCryptoComponentsCalled: func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return true + }, + }, + SigHandler: &testsConsensus.SigningHandlerStub{}, + } + }, + GetBroadcastMessengerCalled: func() consensus.BroadcastMessenger { + return &mockConsensus.BroadcastMessengerMock{} + }, + } +} diff --git a/node/customConfigsArm64.go b/node/customConfigsArm64.go new file mode 100644 index 00000000000..ce62a5fa604 --- /dev/null +++ b/node/customConfigsArm64.go @@ -0,0 +1,29 @@ +//go:build arm64 + +package node + +import ( + "runtime" + + "github.com/multiversx/mx-chain-go/config" +) + +// ApplyArchCustomConfigs will apply configuration tweaks based on the architecture the node is running on +func ApplyArchCustomConfigs(configs *config.Configs) { + log.Debug("ApplyArchCustomConfigs", "architecture", runtime.GOARCH) + + firstSupportedWasmer2VMVersion := "v1.5" + log.Debug("ApplyArchCustomConfigs - hardcoding the initial VM to " + firstSupportedWasmer2VMVersion) + configs.GeneralConfig.VirtualMachine.Execution.WasmVMVersions = []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: firstSupportedWasmer2VMVersion, + }, + } + configs.GeneralConfig.VirtualMachine.Querying.WasmVMVersions = []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: firstSupportedWasmer2VMVersion, + }, + } +} diff --git a/node/customConfigsArm64_test.go b/node/customConfigsArm64_test.go new file mode 100644 index 00000000000..5af9a729518 --- /dev/null +++ b/node/customConfigsArm64_test.go @@ -0,0 +1,92 @@ +//go:build arm64 + +package node + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/assert" +) + +func TestApplyArchCustomConfigs(t *testing.T) { + t.Parallel() + + executionVMConfig := config.VirtualMachineConfig{ + WasmVMVersions: []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: "v1.2", + }, + { + StartEpoch: 1, + Version: "v1.3", + }, + { + StartEpoch: 2, + Version: "v1.4", + }, + { + StartEpoch: 3, + Version: "v1.5", + }, + }, + TransferAndExecuteByUserAddresses: []string{"3132333435363738393031323334353637383930313233343536373839303234"}, + TimeOutForSCExecutionInMilliseconds: 1, + WasmerSIGSEGVPassthrough: true, + } + + queryVMConfig := config.QueryVirtualMachineConfig{ + VirtualMachineConfig: executionVMConfig, + NumConcurrentVMs: 15, + } + + expectedVMWasmVersionsConfig := []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: "v1.5", + }, + } + + t.Run("providing a configuration should alter it", func(t *testing.T) { + t.Parallel() + + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{ + VirtualMachine: config.VirtualMachineServicesConfig{ + Execution: executionVMConfig, + Querying: queryVMConfig, + }, + }, + } + + expectedVMConfig := providedConfigs.GeneralConfig.VirtualMachine + expectedVMConfig.Execution.WasmVMVersions = expectedVMWasmVersionsConfig + expectedVMConfig.Querying.WasmVMVersions = expectedVMWasmVersionsConfig + + ApplyArchCustomConfigs(providedConfigs) + + assert.Equal(t, expectedVMConfig, providedConfigs.GeneralConfig.VirtualMachine) + }) + t.Run("empty config should return an altered config", func(t *testing.T) { + t.Parallel() + + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{}, + } + + expectedVMConfig := providedConfigs.GeneralConfig.VirtualMachine + expectedVMConfig.Execution.WasmVMVersions = expectedVMWasmVersionsConfig + expectedVMConfig.Querying.WasmVMVersions = expectedVMWasmVersionsConfig + + ApplyArchCustomConfigs(providedConfigs) + + expectedConfig := &config.Configs{ + GeneralConfig: &config.Config{ + VirtualMachine: expectedVMConfig, + }, + } + + assert.Equal(t, expectedConfig, providedConfigs) + }) +} diff --git a/node/customConfigsDefault.go b/node/customConfigsDefault.go new file mode 100644 index 00000000000..b762871db10 --- /dev/null +++ b/node/customConfigsDefault.go @@ -0,0 +1,14 @@ +//go:build !arm64 + +package node + +import ( + "runtime" + + "github.com/multiversx/mx-chain-go/config" +) + +// ApplyArchCustomConfigs will apply configuration tweaks based on the architecture the node is running on +func ApplyArchCustomConfigs(_ *config.Configs) { + log.Debug("ApplyArchCustomConfigs - nothing to do", "architecture", runtime.GOARCH) +} diff --git a/node/customConfigsDefault_test.go b/node/customConfigsDefault_test.go new file mode 100644 index 00000000000..705959b78cc --- /dev/null +++ b/node/customConfigsDefault_test.go @@ -0,0 +1,75 @@ +//go:build !arm64 + +package node + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/assert" +) + +func TestApplyArchCustomConfigs(t *testing.T) { + t.Parallel() + + executionVMConfig := config.VirtualMachineConfig{ + WasmVMVersions: []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: "v1.2", + }, + { + StartEpoch: 1, + Version: "v1.3", + }, + { + StartEpoch: 2, + Version: "v1.4", + }, + { + StartEpoch: 3, + Version: "v1.5", + }, + }, + TransferAndExecuteByUserAddresses: []string{"3132333435363738393031323334353637383930313233343536373839303234"}, + TimeOutForSCExecutionInMilliseconds: 1, + WasmerSIGSEGVPassthrough: true, + } + + queryVMConfig := config.QueryVirtualMachineConfig{ + VirtualMachineConfig: executionVMConfig, + NumConcurrentVMs: 15, + } + + t.Run("providing a configuration should not alter it", func(t *testing.T) { + t.Parallel() + + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{ + VirtualMachine: config.VirtualMachineServicesConfig{ + Execution: executionVMConfig, + Querying: queryVMConfig, + }, + }, + } + + ApplyArchCustomConfigs(providedConfigs) + + assert.Equal(t, executionVMConfig, providedConfigs.GeneralConfig.VirtualMachine.Execution) + assert.Equal(t, queryVMConfig, providedConfigs.GeneralConfig.VirtualMachine.Querying) + }) + t.Run("empty config should return an empty config", func(t *testing.T) { + t.Parallel() + + // this test will prevent adding new config changes without handling them in this test + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{}, + } + emptyConfigs := &config.Configs{ + GeneralConfig: &config.Config{}, + } + ApplyArchCustomConfigs(providedConfigs) + + assert.Equal(t, emptyConfigs, providedConfigs) + }) +} diff --git a/node/external/dtos.go b/node/external/dtos.go index f884d8d32c9..12a6b153c46 100644 --- a/node/external/dtos.go +++ b/node/external/dtos.go @@ -1,20 +1,24 @@ package external +import "github.com/multiversx/mx-chain-core-go/data/transaction" + // ArgsCreateTransaction defines arguments for creating a transaction type ArgsCreateTransaction struct { - Nonce uint64 - Value string - Receiver string - ReceiverUsername []byte - Sender string - SenderUsername []byte - GasPrice uint64 - GasLimit uint64 - DataField []byte - SignatureHex string - ChainID string - Version uint32 - Options uint32 - Guardian string - GuardianSigHex string + Nonce uint64 + Value string + Receiver string + ReceiverUsername []byte + Sender string + SenderUsername []byte + GasPrice uint64 + GasLimit uint64 + DataField []byte + SignatureHex string + ChainID string + Version uint32 + Options uint32 + Guardian string + GuardianSigHex string + Relayer string + InnerTransactions []*transaction.Transaction } diff --git a/node/external/nodeApiResolver.go b/node/external/nodeApiResolver.go index d980e9ad91f..0ae0356f4f7 100644 --- a/node/external/nodeApiResolver.go +++ b/node/external/nodeApiResolver.go @@ -40,7 +40,9 @@ type ArgNodeApiResolver struct { AccountsParser genesis.AccountsParser GasScheduleNotifier common.GasScheduleNotifierAPI ManagedPeersMonitor common.ManagedPeersMonitor + PublicKey string NodesCoordinator nodesCoordinator.NodesCoordinator + StorageManagers []common.StorageManager } // nodeApiResolver can resolve API requests @@ -59,7 +61,9 @@ type nodeApiResolver struct { accountsParser genesis.AccountsParser gasScheduleNotifier common.GasScheduleNotifierAPI managedPeersMonitor common.ManagedPeersMonitor + publicKey string nodesCoordinator nodesCoordinator.NodesCoordinator + storageManagers []common.StorageManager } // NewNodeApiResolver creates a new nodeApiResolver instance @@ -125,7 +129,9 @@ func NewNodeApiResolver(arg ArgNodeApiResolver) (*nodeApiResolver, error) { accountsParser: arg.AccountsParser, gasScheduleNotifier: arg.GasScheduleNotifier, managedPeersMonitor: arg.ManagedPeersMonitor, + publicKey: arg.PublicKey, nodesCoordinator: arg.NodesCoordinator, + storageManagers: arg.StorageManagers, }, nil } @@ -151,6 +157,15 @@ func (nar *nodeApiResolver) SimulateTransactionExecution(tx *transaction.Transac // Close closes all underlying components func (nar *nodeApiResolver) Close() error { + for _, sm := range nar.storageManagers { + if check.IfNil(sm) { + continue + } + + err := sm.Close() + log.LogIfError(err) + } + return nar.scQueryService.Close() } @@ -345,12 +360,23 @@ func (nar *nodeApiResolver) GetManagedKeysCount() int { return nar.managedPeersMonitor.GetManagedKeysCount() } -// GetManagedKeys returns all keys managed by the current node when running in multikey mode +// GetManagedKeys returns all keys that should act as validator(main or backup that took over) and will be managed by this node func (nar *nodeApiResolver) GetManagedKeys() []string { managedKeys := nar.managedPeersMonitor.GetManagedKeys() return nar.parseKeys(managedKeys) } +// GetLoadedKeys returns all keys that were loaded by this node +func (nar *nodeApiResolver) GetLoadedKeys() []string { + loadedKeys := nar.managedPeersMonitor.GetLoadedKeys() + if len(loadedKeys) > 0 { + return nar.parseKeys(loadedKeys) + } + + // node is in single key mode, returning the main public key + return []string{nar.publicKey} +} + // GetEligibleManagedKeys returns the eligible managed keys when node is running in multikey mode func (nar *nodeApiResolver) GetEligibleManagedKeys() ([]string, error) { eligibleKeys, err := nar.managedPeersMonitor.GetEligibleManagedKeys() diff --git a/node/external/nodeApiResolver_test.go b/node/external/nodeApiResolver_test.go index 9e1d0ee516d..5a1cec19787 100644 --- a/node/external/nodeApiResolver_test.go +++ b/node/external/nodeApiResolver_test.go @@ -39,7 +39,7 @@ func createMockArgs() external.ArgNodeApiResolver { APIBlockHandler: &mock.BlockAPIHandlerStub{}, APITransactionHandler: &mock.TransactionAPIHandlerStub{}, APIInternalBlockHandler: &mock.InternalBlockApiHandlerStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, AccountsParser: &genesisMocks.AccountsParserStub{}, GasScheduleNotifier: &testscommon.GasScheduleNotifierMock{}, @@ -594,7 +594,7 @@ func TestNodeApiResolver_GetGenesisNodesPubKeys(t *testing.T) { } arg := createMockArgs() - arg.GenesisNodesSetupHandler = &testscommon.NodesSetupStub{ + arg.GenesisNodesSetupHandler = &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return eligible, waiting }, @@ -739,6 +739,59 @@ func TestNodeApiResolver_GetManagedKeys(t *testing.T) { require.Equal(t, expectedKeys, keys) } +func TestNodeApiResolver_GetLoadedKeys(t *testing.T) { + t.Parallel() + + t.Run("multikey should work", func(t *testing.T) { + t.Parallel() + + providedKeys := [][]byte{ + []byte("pk1"), + []byte("pk2"), + } + expectedKeys := []string{ + "pk1", + "pk2", + } + args := createMockArgs() + args.ManagedPeersMonitor = &testscommon.ManagedPeersMonitorStub{ + GetLoadedKeysCalled: func() [][]byte { + return providedKeys + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + return string(pkBytes) + }, + } + nar, err := external.NewNodeApiResolver(args) + require.NoError(t, err) + + keys := nar.GetLoadedKeys() + require.Equal(t, expectedKeys, keys) + }) + t.Run("single key should work", func(t *testing.T) { + t.Parallel() + + providedKey := "pk1" + expectedKeys := []string{ + "pk1", + } + args := createMockArgs() + args.PublicKey = providedKey + args.ManagedPeersMonitor = &testscommon.ManagedPeersMonitorStub{ + GetLoadedKeysCalled: func() [][]byte { + return [][]byte{} + }, + } + nar, err := external.NewNodeApiResolver(args) + require.NoError(t, err) + + keys := nar.GetLoadedKeys() + require.Equal(t, expectedKeys, keys) + }) +} + func TestNodeApiResolver_GetEligibleManagedKeys(t *testing.T) { t.Parallel() diff --git a/node/external/timemachine/fee/feeComputer_test.go b/node/external/timemachine/fee/feeComputer_test.go index faf1996940e..46e2904d6d2 100644 --- a/node/external/timemachine/fee/feeComputer_test.go +++ b/node/external/timemachine/fee/feeComputer_test.go @@ -21,8 +21,7 @@ import ( func createEconomicsData() process.EconomicsDataHandler { economicsConfig := testscommon.GetEconomicsConfig() economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - Economics: &economicsConfig, + Economics: &economicsConfig, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { if flag == common.PenalizedTooMuchGasFlag { diff --git a/node/external/timemachine/fee/memoryFootprint/memory_test.go b/node/external/timemachine/fee/memoryFootprint/memory_test.go index 2f32427e4de..a854a286ddd 100644 --- a/node/external/timemachine/fee/memoryFootprint/memory_test.go +++ b/node/external/timemachine/fee/memoryFootprint/memory_test.go @@ -30,8 +30,7 @@ func TestFeeComputer_MemoryFootprint(t *testing.T) { economicsConfig := testscommon.GetEconomicsConfig() economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - Economics: &economicsConfig, + Economics: &economicsConfig, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { if flag == common.PenalizedTooMuchGasFlag { diff --git a/node/external/transactionAPI/apiTransactionProcessor.go b/node/external/transactionAPI/apiTransactionProcessor.go index 404cc8eba8d..b12aa9ac86f 100644 --- a/node/external/transactionAPI/apiTransactionProcessor.go +++ b/node/external/transactionAPI/apiTransactionProcessor.go @@ -8,6 +8,7 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" rewardTxData "github.com/multiversx/mx-chain-core-go/data/rewardTx" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" @@ -308,41 +309,43 @@ func (atp *apiTransactionProcessor) getUnsignedTransactionsFromPool(requestedFie } func (atp *apiTransactionProcessor) extractRequestedTxInfo(wrappedTx *txcache.WrappedTransaction, requestedFieldsHandler fieldsHandler) common.Transaction { + fieldGetters := atp.getFieldGettersForTx(wrappedTx) tx := common.Transaction{ TxFields: make(map[string]interface{}), } - tx.TxFields[hashField] = hex.EncodeToString(wrappedTx.TxHash) - - if requestedFieldsHandler.HasNonce { - tx.TxFields[nonceField] = wrappedTx.Tx.GetNonce() + for field, value := range fieldGetters { + if requestedFieldsHandler.IsFieldSet(field) { + tx.TxFields[field] = value + } } - if requestedFieldsHandler.HasSender { - tx.TxFields[senderField], _ = atp.addressPubKeyConverter.Encode(wrappedTx.Tx.GetSndAddr()) - } + return tx +} - if requestedFieldsHandler.HasReceiver { - tx.TxFields[receiverField], _ = atp.addressPubKeyConverter.Encode(wrappedTx.Tx.GetRcvAddr()) +func (atp *apiTransactionProcessor) getFieldGettersForTx(wrappedTx *txcache.WrappedTransaction) map[string]interface{} { + var fieldGetters = map[string]interface{}{ + hashField: hex.EncodeToString(wrappedTx.TxHash), + nonceField: wrappedTx.Tx.GetNonce(), + senderField: atp.addressPubKeyConverter.SilentEncode(wrappedTx.Tx.GetSndAddr(), log), + receiverField: atp.addressPubKeyConverter.SilentEncode(wrappedTx.Tx.GetRcvAddr(), log), + gasLimitField: wrappedTx.Tx.GetGasLimit(), + gasPriceField: wrappedTx.Tx.GetGasPrice(), + rcvUsernameField: wrappedTx.Tx.GetRcvUserName(), + dataField: wrappedTx.Tx.GetData(), + valueField: getTxValue(wrappedTx), + senderShardID: wrappedTx.SenderShardID, + receiverShardID: wrappedTx.ReceiverShardID, } - if requestedFieldsHandler.HasGasLimit { - tx.TxFields[gasLimitField] = wrappedTx.Tx.GetGasLimit() - } - if requestedFieldsHandler.HasGasPrice { - tx.TxFields[gasPriceField] = wrappedTx.Tx.GetGasPrice() - } - if requestedFieldsHandler.HasRcvUsername { - tx.TxFields[rcvUsernameField] = wrappedTx.Tx.GetRcvUserName() - } - if requestedFieldsHandler.HasData { - tx.TxFields[dataField] = wrappedTx.Tx.GetData() - } - if requestedFieldsHandler.HasValue { - tx.TxFields[valueField] = getTxValue(wrappedTx) + guardedTx, isGuardedTx := wrappedTx.Tx.(data.GuardedTransactionHandler) + if isGuardedTx { + fieldGetters[signatureField] = hex.EncodeToString(guardedTx.GetSignature()) + fieldGetters[guardianField] = atp.addressPubKeyConverter.SilentEncode(guardedTx.GetGuardianAddr(), log) + fieldGetters[guardianSignatureField] = hex.EncodeToString(guardedTx.GetGuardianSignature()) } - return tx + return fieldGetters } func (atp *apiTransactionProcessor) fetchTxsForSender(sender string, senderShard uint32) []*txcache.WrappedTransaction { diff --git a/node/external/transactionAPI/apiTransactionProcessor_test.go b/node/external/transactionAPI/apiTransactionProcessor_test.go index f7d90c8f15b..7d86a1610c5 100644 --- a/node/external/transactionAPI/apiTransactionProcessor_test.go +++ b/node/external/transactionAPI/apiTransactionProcessor_test.go @@ -825,7 +825,7 @@ func TestApiTransactionProcessor_GetTransactionsPoolForSender(t *testing.T) { require.NoError(t, err) require.NotNil(t, atp) - res, err := atp.GetTransactionsPoolForSender(sender, "sender,value") + res, err := atp.GetTransactionsPoolForSender(sender, "*") require.NoError(t, err) expectedHashes := []string{hex.EncodeToString(txHash0), hex.EncodeToString(txHash1), hex.EncodeToString(txHash2), hex.EncodeToString(txHash3), hex.EncodeToString(txHash4)} expectedValues := []string{"100001", "100002", "100003", "100004", "100005"} diff --git a/node/external/transactionAPI/fieldsHandler.go b/node/external/transactionAPI/fieldsHandler.go index 43ea27d473a..4f837968cb7 100644 --- a/node/external/transactionAPI/fieldsHandler.go +++ b/node/external/transactionAPI/fieldsHandler.go @@ -5,39 +5,60 @@ import ( ) const ( - hashField = "hash" - nonceField = "nonce" - senderField = "sender" - receiverField = "receiver" - gasLimitField = "gaslimit" - gasPriceField = "gasprice" - rcvUsernameField = "receiverusername" - dataField = "data" - valueField = "value" + hashField = "hash" + nonceField = "nonce" + senderField = "sender" + receiverField = "receiver" + gasLimitField = "gaslimit" + gasPriceField = "gasprice" + rcvUsernameField = "receiverusername" + dataField = "data" + valueField = "value" + signatureField = "signature" + guardianField = "guardian" + guardianSignatureField = "guardiansignature" + senderShardID = "sendershard" + receiverShardID = "receivershard" + wildCard = "*" + + separator = "," ) type fieldsHandler struct { - HasNonce bool - HasSender bool - HasReceiver bool - HasGasLimit bool - HasGasPrice bool - HasRcvUsername bool - HasData bool - HasValue bool + fieldsMap map[string]struct{} } func newFieldsHandler(parameters string) fieldsHandler { + if len(parameters) == 0 { + return fieldsHandler{ + fieldsMap: map[string]struct{}{ + hashField: {}, // hash should always be returned + }, + } + } + parameters = strings.ToLower(parameters) - ph := fieldsHandler{ - HasNonce: strings.Contains(parameters, nonceField), - HasSender: strings.Contains(parameters, senderField), - HasReceiver: strings.Contains(parameters, receiverField), - HasGasLimit: strings.Contains(parameters, gasLimitField), - HasGasPrice: strings.Contains(parameters, gasPriceField), - HasRcvUsername: strings.Contains(parameters, rcvUsernameField), - HasData: strings.Contains(parameters, dataField), - HasValue: strings.Contains(parameters, valueField), + return fieldsHandler{ + fieldsMap: sliceToMap(strings.Split(parameters, separator)), + } +} + +// IsFieldSet returns true if the provided field is set +func (handler *fieldsHandler) IsFieldSet(field string) bool { + _, hasWildCard := handler.fieldsMap[wildCard] + if hasWildCard { + return true } - return ph + + _, has := handler.fieldsMap[strings.ToLower(field)] + return has +} + +func sliceToMap(providedSlice []string) map[string]struct{} { + result := make(map[string]struct{}, len(providedSlice)) + for _, entry := range providedSlice { + result[entry] = struct{}{} + } + + return result } diff --git a/node/external/transactionAPI/fieldsHandler_test.go b/node/external/transactionAPI/fieldsHandler_test.go index 0948483fd11..fab3b3a41d9 100644 --- a/node/external/transactionAPI/fieldsHandler_test.go +++ b/node/external/transactionAPI/fieldsHandler_test.go @@ -1,6 +1,8 @@ package transactionAPI import ( + "fmt" + "strings" "testing" "github.com/stretchr/testify/require" @@ -10,18 +12,17 @@ func Test_newFieldsHandler(t *testing.T) { t.Parallel() fh := newFieldsHandler("") - require.Equal(t, fieldsHandler{}, fh) + require.Equal(t, fieldsHandler{map[string]struct{}{hashField: {}}}, fh) - fh = newFieldsHandler("nOnCe,sender,receiver,gasLimit,GASprice,receiverusername,data,value") - expectedPH := fieldsHandler{ - HasNonce: true, - HasSender: true, - HasReceiver: true, - HasGasLimit: true, - HasGasPrice: true, - HasRcvUsername: true, - HasData: true, - HasValue: true, + providedFields := "nOnCe,sender,receiver,gasLimit,GASprice,receiverusername,data,value,signature,guardian,guardiansignature,sendershard,receivershard" + splitFields := strings.Split(providedFields, separator) + fh = newFieldsHandler(providedFields) + for _, field := range splitFields { + require.True(t, fh.IsFieldSet(field), fmt.Sprintf("field %s is not set", field)) + } + + fh = newFieldsHandler("*") + for _, field := range splitFields { + require.True(t, fh.IsFieldSet(field)) } - require.Equal(t, expectedPH, fh) } diff --git a/node/external/transactionAPI/gasUsedAndFeeProcessor.go b/node/external/transactionAPI/gasUsedAndFeeProcessor.go index a22b689d6a4..f0036bc136b 100644 --- a/node/external/transactionAPI/gasUsedAndFeeProcessor.go +++ b/node/external/transactionAPI/gasUsedAndFeeProcessor.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/transaction" + datafield "github.com/multiversx/mx-chain-vm-common-go/parsers/dataField" ) type gasUsedAndFeeProcessor struct { @@ -52,7 +53,7 @@ func (gfp *gasUsedAndFeeProcessor) prepareTxWithResultsBasedOnLogs( tx *transaction.ApiTransactionResult, hasRefund bool, ) { - if tx.Logs == nil { + if tx.Logs == nil || (tx.Function == "" && tx.Operation == datafield.OperationTransfer) { return } diff --git a/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go b/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go index 5c0ba4d4c05..99541bfef5d 100644 --- a/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go +++ b/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go @@ -20,11 +20,10 @@ import ( func createEconomicsData(enableEpochsHandler common.EnableEpochsHandler) process.EconomicsDataHandler { economicsConfig := testscommon.GetEconomicsConfig() economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - Economics: &economicsConfig, - EnableEpochsHandler: enableEpochsHandler, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, + Economics: &economicsConfig, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, }) return economicsData diff --git a/node/external/transactionAPI/unmarshaller.go b/node/external/transactionAPI/unmarshaller.go index c9526217f4f..bc997cdf042 100644 --- a/node/external/transactionAPI/unmarshaller.go +++ b/node/external/transactionAPI/unmarshaller.go @@ -13,6 +13,8 @@ import ( "github.com/multiversx/mx-chain-go/sharding" ) +const operationTransfer = "transfer" + type txUnmarshaller struct { shardCoordinator sharding.Coordinator addressPubKeyConverter core.PubkeyConverter @@ -86,8 +88,25 @@ func (tu *txUnmarshaller) unmarshalTransaction(txBytes []byte, txType transactio } apiTx = tu.prepareUnsignedTx(&tx) } - if err != nil { - return nil, err + + isRelayedV3 := len(apiTx.InnerTransactions) > 0 + if isRelayedV3 { + apiTx.Operation = operationTransfer + for _, innerTx := range apiTx.InnerTransactions { + apiTx.Receivers = append(apiTx.Receivers, innerTx.Receiver) + + rcvBytes, errDecode := tu.addressPubKeyConverter.Decode(innerTx.Receiver) + if errDecode != nil { + log.Warn("bech32PubkeyConverter.Decode() failed while decoding innerTx.Receiver", "error", errDecode) + continue + } + + apiTx.ReceiversShardIDs = append(apiTx.ReceiversShardIDs, tu.shardCoordinator.ComputeId(rcvBytes)) + } + + apiTx.IsRelayed = true + + return apiTx, nil } res := tu.dataFieldParser.Parse(apiTx.Data, apiTx.Tx.GetSndAddr(), apiTx.Tx.GetRcvAddr(), tu.shardCoordinator.NumberOfShards()) @@ -111,21 +130,22 @@ func (tu *txUnmarshaller) prepareNormalTx(tx *transaction.Transaction) *transact senderAddress := tu.addressPubKeyConverter.SilentEncode(tx.SndAddr, log) apiTx := &transaction.ApiTransactionResult{ - Tx: tx, - Type: string(transaction.TxTypeNormal), - Nonce: tx.Nonce, - Value: tx.Value.String(), - Receiver: receiverAddress, - ReceiverUsername: tx.RcvUserName, - Sender: senderAddress, - SenderUsername: tx.SndUserName, - GasPrice: tx.GasPrice, - GasLimit: tx.GasLimit, - Data: tx.Data, - Signature: hex.EncodeToString(tx.Signature), - Options: tx.Options, - Version: tx.Version, - ChainID: string(tx.ChainID), + Tx: tx, + Type: string(transaction.TxTypeNormal), + Nonce: tx.Nonce, + Value: tx.Value.String(), + Receiver: receiverAddress, + ReceiverUsername: tx.RcvUserName, + Sender: senderAddress, + SenderUsername: tx.SndUserName, + GasPrice: tx.GasPrice, + GasLimit: tx.GasLimit, + Data: tx.Data, + Signature: hex.EncodeToString(tx.Signature), + Options: tx.Options, + Version: tx.Version, + ChainID: string(tx.ChainID), + InnerTransactions: tu.prepareInnerTxs(tx), } if len(tx.GuardianAddr) > 0 { @@ -133,29 +153,72 @@ func (tu *txUnmarshaller) prepareNormalTx(tx *transaction.Transaction) *transact apiTx.GuardianSignature = hex.EncodeToString(tx.GuardianSignature) } + if len(tx.RelayerAddr) > 0 { + apiTx.RelayerAddress = tu.addressPubKeyConverter.SilentEncode(tx.RelayerAddr, log) + } + return apiTx } +func (tu *txUnmarshaller) prepareInnerTxs(tx *transaction.Transaction) []*transaction.FrontendTransaction { + if len(tx.InnerTransactions) == 0 { + return nil + } + + innerTxs := make([]*transaction.FrontendTransaction, 0, len(tx.InnerTransactions)) + for _, innerTx := range tx.InnerTransactions { + frontEndTx := &transaction.FrontendTransaction{ + Nonce: innerTx.Nonce, + Value: innerTx.Value.String(), + Receiver: tu.addressPubKeyConverter.SilentEncode(innerTx.RcvAddr, log), + Sender: tu.addressPubKeyConverter.SilentEncode(innerTx.SndAddr, log), + SenderUsername: innerTx.SndUserName, + ReceiverUsername: innerTx.RcvUserName, + GasPrice: innerTx.GasPrice, + GasLimit: innerTx.GasLimit, + Data: innerTx.Data, + Signature: hex.EncodeToString(innerTx.Signature), + ChainID: string(innerTx.ChainID), + Version: innerTx.Version, + Options: innerTx.Options, + } + + if len(innerTx.GuardianAddr) > 0 { + frontEndTx.GuardianAddr = tu.addressPubKeyConverter.SilentEncode(innerTx.GuardianAddr, log) + frontEndTx.GuardianSignature = hex.EncodeToString(innerTx.GuardianSignature) + } + + if len(innerTx.RelayerAddr) > 0 { + frontEndTx.Relayer = tu.addressPubKeyConverter.SilentEncode(innerTx.RelayerAddr, log) + } + + innerTxs = append(innerTxs, frontEndTx) + } + + return innerTxs +} + func (tu *txUnmarshaller) prepareInvalidTx(tx *transaction.Transaction) *transaction.ApiTransactionResult { receiverAddress := tu.addressPubKeyConverter.SilentEncode(tx.RcvAddr, log) senderAddress := tu.addressPubKeyConverter.SilentEncode(tx.SndAddr, log) apiTx := &transaction.ApiTransactionResult{ - Tx: tx, - Type: string(transaction.TxTypeInvalid), - Nonce: tx.Nonce, - Value: tx.Value.String(), - Receiver: receiverAddress, - ReceiverUsername: tx.RcvUserName, - Sender: senderAddress, - SenderUsername: tx.SndUserName, - GasPrice: tx.GasPrice, - GasLimit: tx.GasLimit, - Data: tx.Data, - Signature: hex.EncodeToString(tx.Signature), - Options: tx.Options, - Version: tx.Version, - ChainID: string(tx.ChainID), + Tx: tx, + Type: string(transaction.TxTypeInvalid), + Nonce: tx.Nonce, + Value: tx.Value.String(), + Receiver: receiverAddress, + ReceiverUsername: tx.RcvUserName, + Sender: senderAddress, + SenderUsername: tx.SndUserName, + GasPrice: tx.GasPrice, + GasLimit: tx.GasLimit, + Data: tx.Data, + Signature: hex.EncodeToString(tx.Signature), + Options: tx.Options, + Version: tx.Version, + ChainID: string(tx.ChainID), + InnerTransactions: tu.prepareInnerTxs(tx), } if len(tx.GuardianAddr) > 0 { @@ -163,6 +226,10 @@ func (tu *txUnmarshaller) prepareInvalidTx(tx *transaction.Transaction) *transac apiTx.GuardianSignature = hex.EncodeToString(tx.GuardianSignature) } + if len(tx.RelayerAddr) > 0 { + apiTx.RelayerAddress = tu.addressPubKeyConverter.SilentEncode(tx.RelayerAddr, log) + } + return apiTx } diff --git a/node/metrics/metrics.go b/node/metrics/metrics.go index 3b1151f61af..c380c08b95d 100644 --- a/node/metrics/metrics.go +++ b/node/metrics/metrics.go @@ -30,6 +30,7 @@ func InitBaseMetrics(appStatusHandler core.AppStatusHandler) error { appStatusHandler.SetUInt64Value(common.MetricSynchronizedRound, initUint) appStatusHandler.SetUInt64Value(common.MetricNonce, initUint) + appStatusHandler.SetUInt64Value(common.MetricBlockTimestamp, initUint) appStatusHandler.SetUInt64Value(common.MetricCountConsensus, initUint) appStatusHandler.SetUInt64Value(common.MetricCountLeader, initUint) appStatusHandler.SetUInt64Value(common.MetricCountAcceptedBlocks, initUint) @@ -53,6 +54,8 @@ func InitBaseMetrics(appStatusHandler core.AppStatusHandler) error { appStatusHandler.SetUInt64Value(common.MetricTrieSyncNumReceivedBytes, initUint) appStatusHandler.SetUInt64Value(common.MetricAccountsSnapshotInProgress, initUint) appStatusHandler.SetUInt64Value(common.MetricPeersSnapshotInProgress, initUint) + appStatusHandler.SetUInt64Value(common.MetricNonceAtEpochStart, initUint) + appStatusHandler.SetUInt64Value(common.MetricRoundAtEpochStart, initUint) appStatusHandler.SetInt64Value(common.MetricLastAccountsSnapshotDurationSec, initInt) appStatusHandler.SetInt64Value(common.MetricLastPeersSnapshotDurationSec, initInt) @@ -92,6 +95,7 @@ func InitConfigMetrics( enableEpochs := epochConfig.EnableEpochs + // enable epochs metrics appStatusHandler.SetUInt64Value(common.MetricScDeployEnableEpoch, uint64(enableEpochs.SCDeployEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricBuiltInFunctionsEnableEpoch, uint64(enableEpochs.BuiltInFunctionsEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricRelayedTransactionsEnableEpoch, uint64(enableEpochs.RelayedTransactionsEnableEpoch)) @@ -117,19 +121,87 @@ func InitConfigMetrics( appStatusHandler.SetUInt64Value(common.MetricReturnDataToLastTransferEnableEpoch, uint64(enableEpochs.ReturnDataToLastTransferEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricSenderInOutTransferEnableEpoch, uint64(enableEpochs.SenderInOutTransferEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricRelayedTransactionsV2EnableEpoch, uint64(enableEpochs.RelayedTransactionsV2EnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricRelayedTransactionsV3EnableEpoch, uint64(enableEpochs.RelayedTransactionsV3EnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricFixRelayedBaseCostEnableEpoch, uint64(enableEpochs.FixRelayedBaseCostEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricUnbondTokensV2EnableEpoch, uint64(enableEpochs.UnbondTokensV2EnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricSaveJailedAlwaysEnableEpoch, uint64(enableEpochs.SaveJailedAlwaysEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricValidatorToDelegationEnableEpoch, uint64(enableEpochs.ValidatorToDelegationEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricReDelegateBelowMinCheckEnableEpoch, uint64(enableEpochs.ReDelegateBelowMinCheckEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricIncrementSCRNonceInMultiTransferEnableEpoch, uint64(enableEpochs.IncrementSCRNonceInMultiTransferEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricScheduledMiniBlocksEnableEpoch, uint64(enableEpochs.ScheduledMiniBlocksEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricESDTMultiTransferEnableEpoch, uint64(enableEpochs.ESDTMultiTransferEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricGlobalMintBurnDisableEpoch, uint64(enableEpochs.GlobalMintBurnDisableEpoch)) appStatusHandler.SetUInt64Value(common.MetricESDTTransferRoleEnableEpoch, uint64(enableEpochs.ESDTTransferRoleEnableEpoch)) - appStatusHandler.SetUInt64Value(common.MetricBuiltInFunctionOnMetaEnableEpoch, uint64(enableEpochs.BuiltInFunctionOnMetaEnableEpoch)) - appStatusHandler.SetStringValue(common.MetricTotalSupply, economicsConfig.GlobalSettings.GenesisTotalSupply) - appStatusHandler.SetUInt64Value(common.MetricWaitingListFixEnableEpoch, uint64(enableEpochs.WaitingListFixEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricComputeRewardCheckpointEnableEpoch, uint64(enableEpochs.ComputeRewardCheckpointEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricSCRSizeInvariantCheckEnableEpoch, uint64(enableEpochs.SCRSizeInvariantCheckEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricBackwardCompSaveKeyValueEnableEpoch, uint64(enableEpochs.BackwardCompSaveKeyValueEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricESDTNFTCreateOnMultiShardEnableEpoch, uint64(enableEpochs.ESDTNFTCreateOnMultiShardEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricMetaESDTSetEnableEpoch, uint64(enableEpochs.MetaESDTSetEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricAddTokensToDelegationEnableEpoch, uint64(enableEpochs.AddTokensToDelegationEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricMultiESDTTransferFixOnCallBackOnEnableEpoch, uint64(enableEpochs.MultiESDTTransferFixOnCallBackOnEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricOptimizeGasUsedInCrossMiniBlocksEnableEpoch, uint64(enableEpochs.OptimizeGasUsedInCrossMiniBlocksEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricCorrectFirstQueuedEpoch, uint64(enableEpochs.CorrectFirstQueuedEpoch)) + appStatusHandler.SetUInt64Value(common.MetricCorrectJailedNotUnstakedEmptyQueueEpoch, uint64(enableEpochs.CorrectJailedNotUnstakedEmptyQueueEpoch)) + appStatusHandler.SetUInt64Value(common.MetricFixOOGReturnCodeEnableEpoch, uint64(enableEpochs.FixOOGReturnCodeEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricRemoveNonUpdatedStorageEnableEpoch, uint64(enableEpochs.RemoveNonUpdatedStorageEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricDeleteDelegatorAfterClaimRewardsEnableEpoch, uint64(enableEpochs.DeleteDelegatorAfterClaimRewardsEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricOptimizeNFTStoreEnableEpoch, uint64(enableEpochs.OptimizeNFTStoreEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricCreateNFTThroughExecByCallerEnableEpoch, uint64(enableEpochs.CreateNFTThroughExecByCallerEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricStopDecreasingValidatorRatingWhenStuckEnableEpoch, uint64(enableEpochs.StopDecreasingValidatorRatingWhenStuckEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricFrontRunningProtectionEnableEpoch, uint64(enableEpochs.FrontRunningProtectionEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricIsPayableBySCEnableEpoch, uint64(enableEpochs.IsPayableBySCEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricStorageAPICostOptimizationEnableEpoch, uint64(enableEpochs.StorageAPICostOptimizationEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricTransformToMultiShardCreateEnableEpoch, uint64(enableEpochs.TransformToMultiShardCreateEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricESDTRegisterAndSetAllRolesEnableEpoch, uint64(enableEpochs.ESDTRegisterAndSetAllRolesEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricDoNotReturnOldBlockInBlockchainHookEnableEpoch, uint64(enableEpochs.DoNotReturnOldBlockInBlockchainHookEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricAddFailedRelayedTxToInvalidMBsDisableEpoch, uint64(enableEpochs.AddFailedRelayedTxToInvalidMBsDisableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricSCRSizeInvariantOnBuiltInResultEnableEpoch, uint64(enableEpochs.SCRSizeInvariantOnBuiltInResultEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricCheckCorrectTokenIDForTransferRoleEnableEpoch, uint64(enableEpochs.CheckCorrectTokenIDForTransferRoleEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricDisableExecByCallerEnableEpoch, uint64(enableEpochs.DisableExecByCallerEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricFailExecutionOnEveryAPIErrorEnableEpoch, uint64(enableEpochs.FailExecutionOnEveryAPIErrorEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricManagedCryptoAPIsEnableEpoch, uint64(enableEpochs.ManagedCryptoAPIsEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricRefactorContextEnableEpoch, uint64(enableEpochs.RefactorContextEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricCheckFunctionArgumentEnableEpoch, uint64(enableEpochs.CheckFunctionArgumentEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricCheckExecuteOnReadOnlyEnableEpoch, uint64(enableEpochs.CheckExecuteOnReadOnlyEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricMiniBlockPartialExecutionEnableEpoch, uint64(enableEpochs.MiniBlockPartialExecutionEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricESDTMetadataContinuousCleanupEnableEpoch, uint64(enableEpochs.ESDTMetadataContinuousCleanupEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricFixAsyncCallBackArgsListEnableEpoch, uint64(enableEpochs.FixAsyncCallBackArgsListEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricFixOldTokenLiquidityEnableEpoch, uint64(enableEpochs.FixOldTokenLiquidityEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricRuntimeMemStoreLimitEnableEpoch, uint64(enableEpochs.RuntimeMemStoreLimitEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricRuntimeCodeSizeFixEnableEpoch, uint64(enableEpochs.RuntimeCodeSizeFixEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricSetSenderInEeiOutputTransferEnableEpoch, uint64(enableEpochs.SetSenderInEeiOutputTransferEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricRefactorPeersMiniBlocksEnableEpoch, uint64(enableEpochs.RefactorPeersMiniBlocksEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricSCProcessorV2EnableEpoch, uint64(enableEpochs.SCProcessorV2EnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricMaxBlockchainHookCountersEnableEpoch, uint64(enableEpochs.MaxBlockchainHookCountersEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricWipeSingleNFTLiquidityDecreaseEnableEpoch, uint64(enableEpochs.WipeSingleNFTLiquidityDecreaseEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricAlwaysSaveTokenMetaDataEnableEpoch, uint64(enableEpochs.AlwaysSaveTokenMetaDataEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricCleanUpInformativeSCRsEnableEpoch, uint64(enableEpochs.CleanUpInformativeSCRsEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricSetGuardianEnableEpoch, uint64(enableEpochs.SetGuardianEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricSetScToScLogEventEnableEpoch, uint64(enableEpochs.ScToScLogEventEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricRelayedNonceFixEnableEpoch, uint64(enableEpochs.RelayedNonceFixEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricDeterministicSortOnValidatorsInfoEnableEpoch, uint64(enableEpochs.DeterministicSortOnValidatorsInfoEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricKeepExecOrderOnCreatedSCRsEnableEpoch, uint64(enableEpochs.KeepExecOrderOnCreatedSCRsEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricMultiClaimOnDelegationEnableEpoch, uint64(enableEpochs.MultiClaimOnDelegationEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricChangeUsernameEnableEpoch, uint64(enableEpochs.ChangeUsernameEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricAutoBalanceDataTriesEnableEpoch, uint64(enableEpochs.AutoBalanceDataTriesEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricMigrateDataTrieEnableEpoch, uint64(enableEpochs.MigrateDataTrieEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricConsistentTokensValuesLengthCheckEnableEpoch, uint64(enableEpochs.ConsistentTokensValuesLengthCheckEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricFixDelegationChangeOwnerOnAccountEnableEpoch, uint64(enableEpochs.FixDelegationChangeOwnerOnAccountEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricDynamicGasCostForDataTrieStorageLoadEnableEpoch, uint64(enableEpochs.DynamicGasCostForDataTrieStorageLoadEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricNFTStopCreateEnableEpoch, uint64(enableEpochs.NFTStopCreateEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricChangeOwnerAddressCrossShardThroughSCEnableEpoch, uint64(enableEpochs.ChangeOwnerAddressCrossShardThroughSCEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricFixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch, uint64(enableEpochs.FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricCurrentRandomnessOnSortingEnableEpoch, uint64(enableEpochs.CurrentRandomnessOnSortingEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricStakeLimitsEnableEpoch, uint64(enableEpochs.StakeLimitsEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricStakingV4Step1EnableEpoch, uint64(enableEpochs.StakingV4Step1EnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricStakingV4Step2EnableEpoch, uint64(enableEpochs.StakingV4Step2EnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricStakingV4Step3EnableEpoch, uint64(enableEpochs.StakingV4Step3EnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricCleanupAuctionOnLowWaitingListEnableEpoch, uint64(enableEpochs.CleanupAuctionOnLowWaitingListEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricAlwaysMergeContextsInEEIEnableEpoch, uint64(enableEpochs.AlwaysMergeContextsInEEIEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricDynamicESDTEnableEpoch, uint64(enableEpochs.DynamicESDTEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricEGLDInMultiTransferEnableEpoch, uint64(enableEpochs.EGLDInMultiTransferEnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricCryptoOpcodesV2EnableEpoch, uint64(enableEpochs.CryptoOpcodesV2EnableEpoch)) + appStatusHandler.SetUInt64Value(common.MetricMultiESDTNFTTransferAndExecuteByUserEnableEpoch, uint64(enableEpochs.MultiESDTNFTTransferAndExecuteByUserEnableEpoch)) for i, nodesChangeConfig := range enableEpochs.MaxNodesChangeEnableEpoch { epochEnable := fmt.Sprintf("%s%d%s", common.MetricMaxNodesChangeEnableEpoch, i, common.EpochEnableSuffix) @@ -146,6 +218,7 @@ func InitConfigMetrics( appStatusHandler.SetStringValue(common.MetricHysteresis, fmt.Sprintf("%f", genesisNodesConfig.GetHysteresis())) appStatusHandler.SetStringValue(common.MetricAdaptivity, fmt.Sprintf("%t", genesisNodesConfig.GetAdaptivity())) appStatusHandler.SetStringValue(common.MetricGatewayMetricsEndpoint, gatewayMetricsConfig.URL) + appStatusHandler.SetStringValue(common.MetricTotalSupply, economicsConfig.GlobalSettings.GenesisTotalSupply) return nil } diff --git a/node/metrics/metrics_test.go b/node/metrics/metrics_test.go index 0a0e3e57cc7..395d42afc15 100644 --- a/node/metrics/metrics_test.go +++ b/node/metrics/metrics_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" @@ -22,6 +23,7 @@ func TestInitBaseMetrics(t *testing.T) { expectedKeys := []string{ common.MetricSynchronizedRound, common.MetricNonce, + common.MetricBlockTimestamp, common.MetricCountConsensus, common.MetricCountLeader, common.MetricCountAcceptedBlocks, @@ -63,6 +65,8 @@ func TestInitBaseMetrics(t *testing.T) { common.MetricAccountsSnapshotNumNodes, common.MetricTrieSyncNumProcessedNodes, common.MetricTrieSyncNumReceivedBytes, + common.MetricRoundAtEpochStart, + common.MetricNonceAtEpochStart, } keys := make(map[string]struct{}) @@ -101,43 +105,112 @@ func TestInitConfigMetrics(t *testing.T) { cfg := config.EpochConfig{ EnableEpochs: config.EnableEpochs{ - SCDeployEnableEpoch: 1, - BuiltInFunctionsEnableEpoch: 2, - RelayedTransactionsEnableEpoch: 3, - PenalizedTooMuchGasEnableEpoch: 4, - SwitchJailWaitingEnableEpoch: 5, - SwitchHysteresisForMinNodesEnableEpoch: 6, - BelowSignedThresholdEnableEpoch: 7, - TransactionSignedWithTxHashEnableEpoch: 8, - MetaProtectionEnableEpoch: 9, - AheadOfTimeGasUsageEnableEpoch: 10, - GasPriceModifierEnableEpoch: 11, - RepairCallbackEnableEpoch: 12, - BlockGasAndFeesReCheckEnableEpoch: 13, - StakingV2EnableEpoch: 14, - StakeEnableEpoch: 15, - DoubleKeyProtectionEnableEpoch: 16, - ESDTEnableEpoch: 17, - GovernanceEnableEpoch: 18, - DelegationManagerEnableEpoch: 19, - DelegationSmartContractEnableEpoch: 20, - CorrectLastUnjailedEnableEpoch: 21, - BalanceWaitingListsEnableEpoch: 22, - ReturnDataToLastTransferEnableEpoch: 23, - SenderInOutTransferEnableEpoch: 24, - RelayedTransactionsV2EnableEpoch: 25, - UnbondTokensV2EnableEpoch: 26, - SaveJailedAlwaysEnableEpoch: 27, - ValidatorToDelegationEnableEpoch: 28, - ReDelegateBelowMinCheckEnableEpoch: 29, - IncrementSCRNonceInMultiTransferEnableEpoch: 30, - ESDTMultiTransferEnableEpoch: 31, - GlobalMintBurnDisableEpoch: 32, - ESDTTransferRoleEnableEpoch: 33, - BuiltInFunctionOnMetaEnableEpoch: 34, - WaitingListFixEnableEpoch: 35, - SetGuardianEnableEpoch: 36, - ScToScLogEventEnableEpoch: 37, + SCDeployEnableEpoch: 1, + BuiltInFunctionsEnableEpoch: 2, + RelayedTransactionsEnableEpoch: 3, + PenalizedTooMuchGasEnableEpoch: 4, + SwitchJailWaitingEnableEpoch: 5, + SwitchHysteresisForMinNodesEnableEpoch: 6, + BelowSignedThresholdEnableEpoch: 7, + TransactionSignedWithTxHashEnableEpoch: 8, + MetaProtectionEnableEpoch: 9, + AheadOfTimeGasUsageEnableEpoch: 10, + GasPriceModifierEnableEpoch: 11, + RepairCallbackEnableEpoch: 12, + BlockGasAndFeesReCheckEnableEpoch: 13, + StakingV2EnableEpoch: 14, + StakeEnableEpoch: 15, + DoubleKeyProtectionEnableEpoch: 16, + ESDTEnableEpoch: 17, + GovernanceEnableEpoch: 18, + DelegationManagerEnableEpoch: 19, + DelegationSmartContractEnableEpoch: 20, + CorrectLastUnjailedEnableEpoch: 21, + BalanceWaitingListsEnableEpoch: 22, + ReturnDataToLastTransferEnableEpoch: 23, + SenderInOutTransferEnableEpoch: 24, + RelayedTransactionsV2EnableEpoch: 25, + UnbondTokensV2EnableEpoch: 26, + SaveJailedAlwaysEnableEpoch: 27, + ValidatorToDelegationEnableEpoch: 28, + ReDelegateBelowMinCheckEnableEpoch: 29, + IncrementSCRNonceInMultiTransferEnableEpoch: 30, + ScheduledMiniBlocksEnableEpoch: 31, + ESDTMultiTransferEnableEpoch: 32, + GlobalMintBurnDisableEpoch: 33, + ESDTTransferRoleEnableEpoch: 34, + ComputeRewardCheckpointEnableEpoch: 35, + SCRSizeInvariantCheckEnableEpoch: 36, + BackwardCompSaveKeyValueEnableEpoch: 37, + ESDTNFTCreateOnMultiShardEnableEpoch: 38, + MetaESDTSetEnableEpoch: 39, + AddTokensToDelegationEnableEpoch: 40, + MultiESDTTransferFixOnCallBackOnEnableEpoch: 41, + OptimizeGasUsedInCrossMiniBlocksEnableEpoch: 42, + CorrectFirstQueuedEpoch: 43, + CorrectJailedNotUnstakedEmptyQueueEpoch: 44, + FixOOGReturnCodeEnableEpoch: 45, + RemoveNonUpdatedStorageEnableEpoch: 46, + DeleteDelegatorAfterClaimRewardsEnableEpoch: 47, + OptimizeNFTStoreEnableEpoch: 48, + CreateNFTThroughExecByCallerEnableEpoch: 49, + StopDecreasingValidatorRatingWhenStuckEnableEpoch: 50, + FrontRunningProtectionEnableEpoch: 51, + IsPayableBySCEnableEpoch: 52, + CleanUpInformativeSCRsEnableEpoch: 53, + StorageAPICostOptimizationEnableEpoch: 54, + TransformToMultiShardCreateEnableEpoch: 55, + ESDTRegisterAndSetAllRolesEnableEpoch: 56, + DoNotReturnOldBlockInBlockchainHookEnableEpoch: 57, + AddFailedRelayedTxToInvalidMBsDisableEpoch: 58, + SCRSizeInvariantOnBuiltInResultEnableEpoch: 59, + CheckCorrectTokenIDForTransferRoleEnableEpoch: 60, + DisableExecByCallerEnableEpoch: 61, + FailExecutionOnEveryAPIErrorEnableEpoch: 62, + ManagedCryptoAPIsEnableEpoch: 63, + RefactorContextEnableEpoch: 64, + CheckFunctionArgumentEnableEpoch: 65, + CheckExecuteOnReadOnlyEnableEpoch: 66, + MiniBlockPartialExecutionEnableEpoch: 67, + ESDTMetadataContinuousCleanupEnableEpoch: 68, + FixAsyncCallBackArgsListEnableEpoch: 69, + FixOldTokenLiquidityEnableEpoch: 70, + RuntimeMemStoreLimitEnableEpoch: 71, + RuntimeCodeSizeFixEnableEpoch: 72, + SetSenderInEeiOutputTransferEnableEpoch: 73, + RefactorPeersMiniBlocksEnableEpoch: 74, + SCProcessorV2EnableEpoch: 75, + MaxBlockchainHookCountersEnableEpoch: 76, + WipeSingleNFTLiquidityDecreaseEnableEpoch: 77, + AlwaysSaveTokenMetaDataEnableEpoch: 78, + SetGuardianEnableEpoch: 79, + RelayedNonceFixEnableEpoch: 80, + DeterministicSortOnValidatorsInfoEnableEpoch: 81, + KeepExecOrderOnCreatedSCRsEnableEpoch: 82, + MultiClaimOnDelegationEnableEpoch: 83, + ChangeUsernameEnableEpoch: 84, + AutoBalanceDataTriesEnableEpoch: 85, + MigrateDataTrieEnableEpoch: 86, + ConsistentTokensValuesLengthCheckEnableEpoch: 87, + FixDelegationChangeOwnerOnAccountEnableEpoch: 88, + DynamicGasCostForDataTrieStorageLoadEnableEpoch: 89, + NFTStopCreateEnableEpoch: 90, + ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 91, + FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 92, + CurrentRandomnessOnSortingEnableEpoch: 93, + StakeLimitsEnableEpoch: 94, + StakingV4Step1EnableEpoch: 95, + StakingV4Step2EnableEpoch: 96, + StakingV4Step3EnableEpoch: 97, + CleanupAuctionOnLowWaitingListEnableEpoch: 98, + AlwaysMergeContextsInEEIEnableEpoch: 99, + DynamicESDTEnableEpoch: 100, + EGLDInMultiTransferEnableEpoch: 101, + CryptoOpcodesV2EnableEpoch: 102, + ScToScLogEventEnableEpoch: 103, + RelayedTransactionsV3EnableEpoch: 104, + FixRelayedBaseCostEnableEpoch: 105, + MultiESDTNFTTransferAndExecuteByUserEnableEpoch: 106, MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ { EpochEnable: 0, @@ -153,51 +226,120 @@ func TestInitConfigMetrics(t *testing.T) { } expectedValues := map[string]interface{}{ - "erd_smart_contract_deploy_enable_epoch": uint32(1), - "erd_built_in_functions_enable_epoch": uint32(2), - "erd_relayed_transactions_enable_epoch": uint32(3), - "erd_penalized_too_much_gas_enable_epoch": uint32(4), - "erd_switch_jail_waiting_enable_epoch": uint32(5), - "erd_switch_hysteresis_for_min_nodes_enable_epoch": uint32(6), - "erd_below_signed_threshold_enable_epoch": uint32(7), - "erd_transaction_signed_with_txhash_enable_epoch": uint32(8), - "erd_meta_protection_enable_epoch": uint32(9), - "erd_ahead_of_time_gas_usage_enable_epoch": uint32(10), - "erd_gas_price_modifier_enable_epoch": uint32(11), - "erd_repair_callback_enable_epoch": uint32(12), - "erd_block_gas_and_fee_recheck_enable_epoch": uint32(13), - "erd_staking_v2_enable_epoch": uint32(14), - "erd_stake_enable_epoch": uint32(15), - "erd_double_key_protection_enable_epoch": uint32(16), - "erd_esdt_enable_epoch": uint32(17), - "erd_governance_enable_epoch": uint32(18), - "erd_delegation_manager_enable_epoch": uint32(19), - "erd_delegation_smart_contract_enable_epoch": uint32(20), - "erd_correct_last_unjailed_enable_epoch": uint32(21), - "erd_balance_waiting_lists_enable_epoch": uint32(22), - "erd_return_data_to_last_transfer_enable_epoch": uint32(23), - "erd_sender_in_out_transfer_enable_epoch": uint32(24), - "erd_relayed_transactions_v2_enable_epoch": uint32(25), - "erd_unbond_tokens_v2_enable_epoch": uint32(26), - "erd_save_jailed_always_enable_epoch": uint32(27), - "erd_validator_to_delegation_enable_epoch": uint32(28), - "erd_redelegate_below_min_check_enable_epoch": uint32(29), - "erd_increment_scr_nonce_in_multi_transfer_enable_epoch": uint32(30), - "erd_esdt_multi_transfer_enable_epoch": uint32(31), - "erd_global_mint_burn_disable_epoch": uint32(32), - "erd_esdt_transfer_role_enable_epoch": uint32(33), - "erd_builtin_function_on_meta_enable_epoch": uint32(34), - "erd_waiting_list_fix_enable_epoch": uint32(35), - "erd_max_nodes_change_enable_epoch": nil, - "erd_total_supply": "12345", - "erd_hysteresis": "0.100000", - "erd_adaptivity": "true", - "erd_max_nodes_change_enable_epoch0_epoch_enable": uint32(0), - "erd_max_nodes_change_enable_epoch0_max_num_nodes": uint32(1), - "erd_max_nodes_change_enable_epoch0_nodes_to_shuffle_per_shard": uint32(2), - "erd_set_guardian_feature_enable_epoch": uint32(36), - "erd_set_sc_to_sc_log_event_enable_epoch": uint32(37), - common.MetricGatewayMetricsEndpoint: "http://localhost:8080", + "erd_smart_contract_deploy_enable_epoch": uint32(1), + "erd_built_in_functions_enable_epoch": uint32(2), + "erd_relayed_transactions_enable_epoch": uint32(3), + "erd_penalized_too_much_gas_enable_epoch": uint32(4), + "erd_switch_jail_waiting_enable_epoch": uint32(5), + "erd_switch_hysteresis_for_min_nodes_enable_epoch": uint32(6), + "erd_below_signed_threshold_enable_epoch": uint32(7), + "erd_transaction_signed_with_txhash_enable_epoch": uint32(8), + "erd_meta_protection_enable_epoch": uint32(9), + "erd_ahead_of_time_gas_usage_enable_epoch": uint32(10), + "erd_gas_price_modifier_enable_epoch": uint32(11), + "erd_repair_callback_enable_epoch": uint32(12), + "erd_block_gas_and_fee_recheck_enable_epoch": uint32(13), + "erd_staking_v2_enable_epoch": uint32(14), + "erd_stake_enable_epoch": uint32(15), + "erd_double_key_protection_enable_epoch": uint32(16), + "erd_esdt_enable_epoch": uint32(17), + "erd_governance_enable_epoch": uint32(18), + "erd_delegation_manager_enable_epoch": uint32(19), + "erd_delegation_smart_contract_enable_epoch": uint32(20), + "erd_correct_last_unjailed_enable_epoch": uint32(21), + "erd_balance_waiting_lists_enable_epoch": uint32(22), + "erd_return_data_to_last_transfer_enable_epoch": uint32(23), + "erd_sender_in_out_transfer_enable_epoch": uint32(24), + "erd_relayed_transactions_v2_enable_epoch": uint32(25), + "erd_unbond_tokens_v2_enable_epoch": uint32(26), + "erd_save_jailed_always_enable_epoch": uint32(27), + "erd_validator_to_delegation_enable_epoch": uint32(28), + "erd_redelegate_below_min_check_enable_epoch": uint32(29), + "erd_increment_scr_nonce_in_multi_transfer_enable_epoch": uint32(30), + "erd_scheduled_miniblocks_enable_epoch": uint32(31), + "erd_esdt_multi_transfer_enable_epoch": uint32(32), + "erd_global_mint_burn_disable_epoch": uint32(33), + "erd_compute_reward_checkpoint_enable_epoch": uint32(35), + "erd_esdt_transfer_role_enable_epoch": uint32(34), + "erd_scr_size_invariant_check_enable_epoch": uint32(36), + "erd_backward_comp_save_keyvalue_enable_epoch": uint32(37), + "erd_esdt_nft_create_on_multi_shard_enable_epoch": uint32(38), + "erd_meta_esdt_set_enable_epoch": uint32(39), + "erd_add_tokens_to_delegation_enable_epoch": uint32(40), + "erd_multi_esdt_transfer_fix_on_callback_enable_epoch": uint32(41), + "erd_optimize_gas_used_in_cross_miniblocks_enable_epoch": uint32(42), + "erd_correct_first_queued_enable_epoch": uint32(43), + "erd_correct_jailed_not_unstaked_empty_queue_enable_epoch": uint32(44), + "erd_fix_oog_return_code_enable_epoch": uint32(45), + "erd_remove_non_updated_storage_enable_epoch": uint32(46), + "erd_delete_delegator_after_claim_rewards_enable_epoch": uint32(47), + "erd_optimize_nft_store_enable_epoch": uint32(48), + "erd_create_nft_through_exec_by_caller_enable_epoch": uint32(49), + "erd_stop_decreasing_validator_rating_when_stuck_enable_epoch": uint32(50), + "erd_front_running_protection_enable_epoch": uint32(51), + "erd_is_payable_by_sc_enable_epoch": uint32(52), + "erd_cleanup_informative_scrs_enable_epoch": uint32(53), + "erd_storage_api_cost_optimization_enable_epoch": uint32(54), + "erd_transform_to_multi_shard_create_enable_epoch": uint32(55), + "erd_esdt_register_and_set_all_roles_enable_epoch": uint32(56), + "erd_do_not_returns_old_block_in_blockchain_hook_enable_epoch": uint32(57), + "erd_add_failed_relayed_tx_to_invalid_mbs_enable_epoch": uint32(58), + "erd_scr_size_invariant_on_builtin_result_enable_epoch": uint32(59), + "erd_check_correct_tokenid_for_transfer_role_enable_epoch": uint32(60), + "erd_disable_exec_by_caller_enable_epoch": uint32(61), + "erd_fail_execution_on_every_api_error_enable_epoch": uint32(62), + "erd_managed_crypto_apis_enable_epoch": uint32(63), + "erd_refactor_context_enable_epoch": uint32(64), + "erd_check_function_argument_enable_epoch": uint32(65), + "erd_check_execute_on_readonly_enable_epoch": uint32(66), + "erd_miniblock_partial_execution_enable_epoch": uint32(67), + "erd_esdt_metadata_continuous_cleanup_enable_epoch": uint32(68), + "erd_fix_async_callback_args_list_enable_epoch": uint32(69), + "erd_fix_old_token_liquidity_enable_epoch": uint32(70), + "erd_runtime_mem_store_limit_enable_epoch": uint32(71), + "erd_runtime_code_size_fix_enable_epoch": uint32(72), + "erd_set_sender_in_eei_output_transfer_enable_epoch": uint32(73), + "erd_refactor_peers_miniblocks_enable_epoch": uint32(74), + "erd_sc_processorv2_enable_epoch": uint32(75), + "erd_max_blockchain_hook_counters_enable_epoch": uint32(76), + "erd_wipe_single_nft_liquidity_decrease_enable_epoch": uint32(77), + "erd_always_save_token_metadata_enable_epoch": uint32(78), + "erd_set_guardian_feature_enable_epoch": uint32(79), + "erd_relayed_nonce_fix_enable_epoch": uint32(80), + "erd_deterministic_sort_on_validators_info_enable_epoch": uint32(81), + "erd_keep_exec_order_on_created_scrs_enable_epoch": uint32(82), + "erd_multi_claim_on_delegation_enable_epoch": uint32(83), + "erd_change_username_enable_epoch": uint32(84), + "erd_auto_balance_data_tries_enable_epoch": uint32(85), + "erd_migrate_datatrie_enable_epoch": uint32(86), + "erd_consistent_tokens_values_length_check_enable_epoch": uint32(87), + "erd_fix_delegation_change_owner_on_account_enable_epoch": uint32(88), + "erd_dynamic_gas_cost_for_datatrie_storage_load_enable_epoch": uint32(89), + "erd_nft_stop_create_enable_epoch": uint32(90), + "erd_change_owner_address_cross_shard_through_sc_enable_epoch": uint32(91), + "erd_fix_gas_remainig_for_save_keyvalue_builtin_function_enable_epoch": uint32(92), + "erd_current_randomness_on_sorting_enable_epoch": uint32(93), + "erd_stake_limits_enable_epoch": uint32(94), + "erd_staking_v4_step1_enable_epoch": uint32(95), + "erd_staking_v4_step2_enable_epoch": uint32(96), + "erd_staking_v4_step3_enable_epoch": uint32(97), + "erd_cleanup_auction_on_low_waiting_list_enable_epoch": uint32(98), + "erd_always_merge_contexts_in_eei_enable_epoch": uint32(99), + "erd_dynamic_esdt_enable_epoch": uint32(100), + "erd_egld_in_multi_transfer_enable_epoch": uint32(101), + "erd_crypto_opcodes_v2_enable_epoch": uint32(102), + "erd_set_sc_to_sc_log_event_enable_epoch": uint32(103), + "erd_relayed_transactions_v3_enable_epoch": uint32(104), + "erd_fix_relayed_base_cost_enable_epoch": uint32(105), + "erd_multi_esdt_transfer_execute_by_user_enable_epoch": uint32(106), + "erd_max_nodes_change_enable_epoch": nil, + "erd_total_supply": "12345", + "erd_hysteresis": "0.100000", + "erd_adaptivity": "true", + "erd_max_nodes_change_enable_epoch0_epoch_enable": uint32(0), + "erd_max_nodes_change_enable_epoch0_max_num_nodes": uint32(1), + "erd_max_nodes_change_enable_epoch0_nodes_to_shuffle_per_shard": uint32(2), + common.MetricGatewayMetricsEndpoint: "http://localhost:8080", } economicsConfig := config.EconomicsConfig{ @@ -206,7 +348,7 @@ func TestInitConfigMetrics(t *testing.T) { }, } - genesisNodesConfig := &testscommon.NodesSetupStub{ + genesisNodesConfig := &genesisMocks.NodesSetupStub{ GetAdaptivityCalled: func() bool { return true }, @@ -237,7 +379,7 @@ func TestInitConfigMetrics(t *testing.T) { assert.Equal(t, v, keys[k]) } - genesisNodesConfig = &testscommon.NodesSetupStub{ + genesisNodesConfig = &genesisMocks.NodesSetupStub{ GetAdaptivityCalled: func() bool { return false }, @@ -363,7 +505,7 @@ func TestInitMetrics(t *testing.T) { return 0 }, } - nodesSetup := &testscommon.NodesSetupStub{ + nodesSetup := &genesisMocks.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return 63 }, diff --git a/node/mock/peerProcessorMock.go b/node/mock/peerProcessorMock.go deleted file mode 100644 index 7ae112df225..00000000000 --- a/node/mock/peerProcessorMock.go +++ /dev/null @@ -1,133 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorMock - -type ValidatorStatisticsProcessorMock struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - IsInterfaceNilCalled func() bool - - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorMock) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorMock) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorMock) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorMock) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorMock) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorMock) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - - return nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorMock) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorMock) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorMock) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorMock) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorMock) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorMock) DisplayRatings(_ uint32) { -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorMock) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorMock) LastFinalizedRootHash() []byte { - return nil -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorMock) IsInterfaceNil() bool { - return false -} diff --git a/node/mock/validatorStatisticsProcessorStub.go b/node/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 2233bc84f03..00000000000 --- a/node/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,130 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - return nil -} - -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false -} diff --git a/node/node.go b/node/node.go index 001bbd23f30..7331b9d44ca 100644 --- a/node/node.go +++ b/node/node.go @@ -54,14 +54,19 @@ var log = logger.GetOrCreate("node") var _ facade.NodeHandler = (*Node)(nil) // Option represents a functional configuration parameter that can operate -// -// over the None struct. +// over the None struct. type Option func(*Node) error type filter interface { filter(tokenIdentifier string, esdtData *systemSmartContracts.ESDTDataV2) bool } +type accountInfo struct { + account state.UserAccountHandler + block api.BlockInfo + accountResponse api.AccountResponse +} + // Node is a structure that holds all managed components type Node struct { initialNodesPubkeys map[uint32][]string @@ -288,16 +293,29 @@ func (n *Node) GetKeyValuePairs(address string, options api.AccountQueryOptions, } if check.IfNil(userAccount.DataTrie()) { - return map[string]string{}, api.BlockInfo{}, nil + return map[string]string{}, blockInfo, nil } + mapToReturn, err := n.getKeys(userAccount, ctx) + if err != nil { + return nil, api.BlockInfo{}, err + } + + if common.IsContextDone(ctx) { + return nil, api.BlockInfo{}, ErrTrieOperationsTimeout + } + + return mapToReturn, blockInfo, nil +} + +func (n *Node) getKeys(userAccount state.UserAccountHandler, ctx context.Context) (map[string]string, error) { chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), ErrChan: errChan.NewErrChanWrapper(), } - err = userAccount.GetAllLeaves(chLeaves, ctx) + err := userAccount.GetAllLeaves(chLeaves, ctx) if err != nil { - return nil, api.BlockInfo{}, err + return nil, err } mapToReturn := make(map[string]string) @@ -307,14 +325,9 @@ func (n *Node) GetKeyValuePairs(address string, options api.AccountQueryOptions, err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { - return nil, api.BlockInfo{}, err - } - - if common.IsContextDone(ctx) { - return nil, api.BlockInfo{}, ErrTrieOperationsTimeout + return nil, err } - - return mapToReturn, blockInfo, nil + return mapToReturn, nil } // GetValueForKey will return the value for a key from a given account @@ -785,6 +798,8 @@ func (n *Node) commonTransactionValidation( enableSignWithTxHash, n.coreComponents.TxSignHasher(), n.coreComponents.TxVersionChecker(), + n.coreComponents.EnableEpochsHandler(), + n.processComponents.RelayedTxV3Processor(), ) if err != nil { return nil, nil, err @@ -878,19 +893,20 @@ func (n *Node) CreateTransaction(txArgs *external.ArgsCreateTransaction) (*trans } tx := &transaction.Transaction{ - Nonce: txArgs.Nonce, - Value: valAsBigInt, - RcvAddr: receiverAddress, - RcvUserName: txArgs.ReceiverUsername, - SndAddr: senderAddress, - SndUserName: txArgs.SenderUsername, - GasPrice: txArgs.GasPrice, - GasLimit: txArgs.GasLimit, - Data: txArgs.DataField, - Signature: signatureBytes, - ChainID: []byte(txArgs.ChainID), - Version: txArgs.Version, - Options: txArgs.Options, + Nonce: txArgs.Nonce, + Value: valAsBigInt, + RcvAddr: receiverAddress, + RcvUserName: txArgs.ReceiverUsername, + SndAddr: senderAddress, + SndUserName: txArgs.SenderUsername, + GasPrice: txArgs.GasPrice, + GasLimit: txArgs.GasLimit, + Data: txArgs.DataField, + Signature: signatureBytes, + ChainID: []byte(txArgs.ChainID), + Version: txArgs.Version, + Options: txArgs.Options, + InnerTransactions: txArgs.InnerTransactions, } if len(txArgs.Guardian) > 0 { @@ -900,6 +916,13 @@ func (n *Node) CreateTransaction(txArgs *external.ArgsCreateTransaction) (*trans } } + if len(txArgs.Relayer) > 0 { + tx.RelayerAddr, err = addrPubKeyConverter.Decode(txArgs.Relayer) + if err != nil { + return nil, nil, errors.New("could not create relayer address from provided param") + } + } + var txHash []byte txHash, err = core.CalculateHash(n.coreComponents.InternalMarshalizer(), n.coreComponents.Hasher(), tx) if err != nil { @@ -931,40 +954,36 @@ func (n *Node) setTxGuardianData(guardian string, guardianSigHex string, tx *tra // GetAccount will return account details for a given address func (n *Node) GetAccount(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - account, blockInfo, err := n.loadUserAccountHandlerByAddress(address, options) + accInfo, err := n.getAccountInfo(address, options) if err != nil { - adaptedBlockInfo, isEmptyAccount := extractBlockInfoIfNewAccount(err) - if isEmptyAccount { - return api.AccountResponse{ - Address: address, - Balance: "0", - DeveloperReward: "0", - }, adaptedBlockInfo, nil - } + return api.AccountResponse{}, api.BlockInfo{}, err + } + return accInfo.accountResponse, accInfo.block, nil +} + +// GetAccountWithKeys will return account details for a given address including the keys +func (n *Node) GetAccountWithKeys(address string, options api.AccountQueryOptions, ctx context.Context) (api.AccountResponse, api.BlockInfo, error) { + accInfo, err := n.getAccountInfo(address, options) + if err != nil { return api.AccountResponse{}, api.BlockInfo{}, err } - ownerAddress := "" - if len(account.GetOwnerAddress()) > 0 { - addressPubkeyConverter := n.coreComponents.AddressPubKeyConverter() - ownerAddress, err = addressPubkeyConverter.Encode(account.GetOwnerAddress()) + var keys map[string]string + if options.WithKeys { + if accInfo.account == nil || accInfo.account.DataTrie() == nil { + return accInfo.accountResponse, accInfo.block, nil + } + + keys, err = n.getKeys(accInfo.account, ctx) if err != nil { return api.AccountResponse{}, api.BlockInfo{}, err } } - return api.AccountResponse{ - Address: address, - Nonce: account.GetNonce(), - Balance: account.GetBalance().String(), - Username: string(account.GetUserName()), - CodeHash: account.GetCodeHash(), - RootHash: account.GetRootHash(), - CodeMetadata: account.GetCodeMetadata(), - DeveloperReward: account.GetDeveloperReward().String(), - OwnerAddress: ownerAddress, - }, blockInfo, nil + accInfo.accountResponse.Pairs = keys + + return accInfo.accountResponse, accInfo.block, nil } func extractBlockInfoIfNewAccount(err error) (api.BlockInfo, bool) { @@ -984,6 +1003,58 @@ func extractBlockInfoIfNewAccount(err error) (api.BlockInfo, bool) { return api.BlockInfo{}, false } +func (n *Node) getAccountInfo(address string, options api.AccountQueryOptions) (accountInfo, error) { + account, blockInfo, err := n.loadUserAccountHandlerByAddress(address, options) + if err != nil { + adaptedBlockInfo, isEmptyAccount := extractBlockInfoIfNewAccount(err) + if isEmptyAccount { + return accountInfo{ + accountResponse: api.AccountResponse{ + Address: address, + Balance: "0", + DeveloperReward: "0", + }, + block: adaptedBlockInfo, + account: account, + }, nil + } + return accountInfo{ + accountResponse: api.AccountResponse{}, + block: api.BlockInfo{}, + account: nil, + }, err + } + + ownerAddress := "" + if len(account.GetOwnerAddress()) > 0 { + addressPubkeyConverter := n.coreComponents.AddressPubKeyConverter() + ownerAddress, err = addressPubkeyConverter.Encode(account.GetOwnerAddress()) + if err != nil { + return accountInfo{ + accountResponse: api.AccountResponse{}, + block: api.BlockInfo{}, + account: nil, + }, err + } + } + + return accountInfo{ + accountResponse: api.AccountResponse{ + Address: address, + Nonce: account.GetNonce(), + Balance: account.GetBalance().String(), + Username: string(account.GetUserName()), + CodeHash: account.GetCodeHash(), + RootHash: account.GetRootHash(), + CodeMetadata: account.GetCodeMetadata(), + DeveloperReward: account.GetDeveloperReward().String(), + OwnerAddress: ownerAddress, + }, + block: blockInfo, + account: account, + }, nil +} + // GetCode returns the code for the given code hash func (n *Node) GetCode(codeHash []byte, options api.AccountQueryOptions) ([]byte, api.BlockInfo) { return n.loadAccountCode(codeHash, options) @@ -1008,6 +1079,11 @@ func (n *Node) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatisti return n.processComponents.ValidatorsProvider().GetLatestValidators(), nil } +// AuctionListApi will return the auction list config along with qualified nodes +func (n *Node) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + return n.processComponents.ValidatorsProvider().GetAuctionList() +} + // DirectTrigger will start the hardfork trigger func (n *Node) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error { return n.processComponents.HardforkTrigger().Trigger(epoch, withEarlyEndOfEpoch) diff --git a/node/nodeRunner.go b/node/nodeRunner.go index a4af2192f2d..591321774cb 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -169,12 +169,10 @@ func printEnableEpochs(configs *config.Configs) { log.Debug(readEpochFor("save jailed always"), "epoch", enableEpochs.SaveJailedAlwaysEnableEpoch) log.Debug(readEpochFor("validator to delegation"), "epoch", enableEpochs.ValidatorToDelegationEnableEpoch) log.Debug(readEpochFor("re-delegate below minimum check"), "epoch", enableEpochs.ReDelegateBelowMinCheckEnableEpoch) - log.Debug(readEpochFor("waiting waiting list"), "epoch", enableEpochs.WaitingListFixEnableEpoch) log.Debug(readEpochFor("increment SCR nonce in multi transfer"), "epoch", enableEpochs.IncrementSCRNonceInMultiTransferEnableEpoch) log.Debug(readEpochFor("esdt and NFT multi transfer"), "epoch", enableEpochs.ESDTMultiTransferEnableEpoch) log.Debug(readEpochFor("contract global mint and burn"), "epoch", enableEpochs.GlobalMintBurnDisableEpoch) log.Debug(readEpochFor("contract transfer role"), "epoch", enableEpochs.ESDTTransferRoleEnableEpoch) - log.Debug(readEpochFor("built in functions on metachain"), "epoch", enableEpochs.BuiltInFunctionOnMetaEnableEpoch) log.Debug(readEpochFor("compute rewards checkpoint on delegation"), "epoch", enableEpochs.ComputeRewardCheckpointEnableEpoch) log.Debug(readEpochFor("esdt NFT create on multiple shards"), "epoch", enableEpochs.ESDTNFTCreateOnMultiShardEnableEpoch) log.Debug(readEpochFor("SCR size invariant check"), "epoch", enableEpochs.SCRSizeInvariantCheckEnableEpoch) @@ -210,6 +208,11 @@ func printEnableEpochs(configs *config.Configs) { log.Debug(readEpochFor("refactor peers mini blocks"), "epoch", enableEpochs.RefactorPeersMiniBlocksEnableEpoch) log.Debug(readEpochFor("runtime memstore limit"), "epoch", enableEpochs.RuntimeMemStoreLimitEnableEpoch) log.Debug(readEpochFor("max blockchainhook counters"), "epoch", enableEpochs.MaxBlockchainHookCountersEnableEpoch) + log.Debug(readEpochFor("limit validators"), "epoch", enableEpochs.StakeLimitsEnableEpoch) + log.Debug(readEpochFor("staking v4 step 1"), "epoch", enableEpochs.StakingV4Step1EnableEpoch) + log.Debug(readEpochFor("staking v4 step 2"), "epoch", enableEpochs.StakingV4Step2EnableEpoch) + log.Debug(readEpochFor("staking v4 step 3"), "epoch", enableEpochs.StakingV4Step3EnableEpoch) + gasSchedule := configs.EpochConfig.GasSchedule log.Debug(readEpochFor("gas schedule directories paths"), "epoch", gasSchedule.GasScheduleByEpochs) @@ -271,6 +274,10 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( chanStopNodeProcess chan endProcess.ArgEndProcess, ) (bool, error) { goRoutinesNumberStart := runtime.NumGoroutine() + + log.Debug("applying custom configs based on the current architecture") + ApplyArchCustomConfigs(nr.configs) + configs := nr.configs flagsConfig := configs.FlagsConfig configurationPaths := configs.ConfigurationPathsHolder @@ -286,6 +293,11 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( return true, err } + err = config.SanityCheckNodesConfig(managedCoreComponents.GenesisNodesSetup(), configs.EpochConfig.EnableEpochs) + if err != nil { + return true, err + } + log.Debug("creating status core components") managedStatusCoreComponents, err := nr.CreateManagedStatusCoreComponents(managedCoreComponents) if err != nil { @@ -377,6 +389,7 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) if err != nil { return true, err @@ -432,7 +445,6 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( managedStateComponents, managedBootstrapComponents, managedProcessComponents, - managedStatusCoreComponents, ) if err != nil { return true, err @@ -561,7 +573,6 @@ func addSyncersToAccountsDB( stateComponents mainFactory.StateComponentsHolder, bootstrapComponents mainFactory.BootstrapComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, - statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) error { selfId := bootstrapComponents.ShardCoordinator().SelfId() if selfId == core.MetachainShardId { @@ -571,7 +582,6 @@ func addSyncersToAccountsDB( dataComponents, stateComponents, processComponents, - statusCoreComponents, ) if err != nil { return err @@ -595,7 +605,6 @@ func addSyncersToAccountsDB( stateComponents, bootstrapComponents, processComponents, - statusCoreComponents, ) if err != nil { return err @@ -615,7 +624,6 @@ func getUserAccountSyncer( stateComponents mainFactory.StateComponentsHolder, bootstrapComponents mainFactory.BootstrapComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, - statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) (process.AccountsDBSyncer, error) { maxTrieLevelInMemory := config.StateTriesConfig.MaxStateTrieLevelInMemory userTrie := stateComponents.TriesContainer().Get([]byte(dataRetriever.UserAccountsUnit.String())) @@ -633,7 +641,6 @@ func getUserAccountSyncer( dataComponents, processComponents, storageManager, - statusCoreComponents, maxTrieLevelInMemory, ), ShardId: bootstrapComponents.ShardCoordinator().SelfId(), @@ -650,7 +657,6 @@ func getValidatorAccountSyncer( dataComponents mainFactory.DataComponentsHolder, stateComponents mainFactory.StateComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, - statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) (process.AccountsDBSyncer, error) { maxTrieLevelInMemory := config.StateTriesConfig.MaxPeerTrieLevelInMemory peerTrie := stateComponents.TriesContainer().Get([]byte(dataRetriever.PeerAccountsUnit.String())) @@ -663,7 +669,6 @@ func getValidatorAccountSyncer( dataComponents, processComponents, storageManager, - statusCoreComponents, maxTrieLevelInMemory, ), } @@ -677,7 +682,6 @@ func getBaseAccountSyncerArgs( dataComponents mainFactory.DataComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, storageManager common.StorageManager, - statusCoreComponents mainFactory.StatusCoreComponentsHolder, maxTrieLevelInMemory uint, ) syncer.ArgsNewBaseAccountsSyncer { return syncer.ArgsNewBaseAccountsSyncer{ @@ -827,6 +831,7 @@ func (nr *nodeRunner) createMetrics( metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricNodeDisplayName, nr.configs.PreferencesConfig.Preferences.NodeDisplayName) metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricRedundancyLevel, fmt.Sprintf("%d", nr.configs.PreferencesConfig.Preferences.RedundancyLevel)) metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricRedundancyIsMainActive, common.MetricValueNA) + metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricRedundancyStepInReason, "") metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricChainId, coreComponents.ChainID()) metrics.SaveUint64Metric(statusCoreComponents.AppStatusHandler(), common.MetricGasPerDataByte, coreComponents.EconomicsData().GasPerDataByte()) metrics.SaveUint64Metric(statusCoreComponents.AppStatusHandler(), common.MetricMinGasPrice, coreComponents.EconomicsData().MinGasPrice()) @@ -1027,7 +1032,7 @@ func (nr *nodeRunner) logInformation( log.Info("Bootstrap", "epoch", bootstrapComponents.EpochBootstrapParams().Epoch()) if bootstrapComponents.EpochBootstrapParams().NodesConfig() != nil { log.Info("the epoch from nodesConfig is", - "epoch", bootstrapComponents.EpochBootstrapParams().NodesConfig().CurrentEpoch) + "epoch", bootstrapComponents.EpochBootstrapParams().NodesConfig().GetCurrentEpoch()) } var shardIdString = core.GetShardIDString(bootstrapComponents.ShardCoordinator().SelfId()) @@ -1235,8 +1240,10 @@ func (nr *nodeRunner) CreateManagedProcessComponents( processArgs := processComp.ProcessComponentsFactoryArgs{ Config: *configs.GeneralConfig, EpochConfig: *configs.EpochConfig, + RoundConfig: *configs.RoundConfig, PrefConfigs: *configs.PreferencesConfig, ImportDBConfig: *configs.ImportDbConfig, + EconomicsConfig: *configs.EconomicsConfig, AccountsParser: accountsParser, SmartContractParser: smartContractParser, GasSchedule: gasScheduleNotifier, diff --git a/node/nodeRunner_test.go b/node/nodeRunner_test.go index 6e3c61a12cd..5d0e9a7666c 100644 --- a/node/nodeRunner_test.go +++ b/node/nodeRunner_test.go @@ -1,5 +1,3 @@ -//go:build !race - package node import ( @@ -22,7 +20,9 @@ import ( const originalConfigsPath = "../cmd/node/config" func TestNewNodeRunner(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } t.Run("nil configs should error", func(t *testing.T) { t.Parallel() @@ -35,7 +35,9 @@ func TestNewNodeRunner(t *testing.T) { t.Run("with valid configs should work", func(t *testing.T) { t.Parallel() - configs := testscommon.CreateTestConfigs(t, originalConfigsPath) + configs, err := testscommon.CreateTestConfigs(t.TempDir(), originalConfigsPath) + require.Nil(t, err) + runner, err := NewNodeRunner(configs) assert.NotNil(t, runner) assert.Nil(t, err) @@ -43,13 +45,17 @@ func TestNewNodeRunner(t *testing.T) { } func TestNodeRunner_StartAndCloseNodeUsingSIGINT(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } + + configs, err := testscommon.CreateTestConfigs(t.TempDir(), originalConfigsPath) + require.Nil(t, err) - configs := testscommon.CreateTestConfigs(t, originalConfigsPath) runner, _ := NewNodeRunner(configs) trigger := mock.NewApplicationRunningTrigger() - err := logger.AddLogObserver(trigger, &logger.PlainFormatter{}) + err = logger.AddLogObserver(trigger, &logger.PlainFormatter{}) require.Nil(t, err) // start a go routine that will send the SIGINT message after 1 second after the node has started @@ -72,7 +78,9 @@ func TestNodeRunner_StartAndCloseNodeUsingSIGINT(t *testing.T) { } func TestCopyDirectory(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } file1Name := "file1.toml" file1Contents := []byte("file1") @@ -130,7 +138,9 @@ func TestCopyDirectory(t *testing.T) { } func TestWaitForSignal(t *testing.T) { - t.Parallel() + if testing.Short() { + t.Skip("this is not a short test") + } closedCalled := make(map[string]struct{}) healthServiceClosableComponent := &mock.CloserStub{ diff --git a/node/nodeTesting.go b/node/nodeTesting.go index 29683432508..bcd15052e21 100644 --- a/node/nodeTesting.go +++ b/node/nodeTesting.go @@ -264,7 +264,7 @@ func (n *Node) generateAndSignTxBuffArray( return tx, signedMarshalizedTx, nil } -//GenerateTransaction generates a new transaction with sender, receiver, amount and code +// GenerateTransaction generates a new transaction with sender, receiver, amount and code func (n *Node) GenerateTransaction(senderHex string, receiverHex string, value *big.Int, transactionData string, privateKey crypto.PrivateKey, chainID []byte, minTxVersion uint32) (*transaction.Transaction, error) { if check.IfNil(n.coreComponents.AddressPubKeyConverter()) { return nil, ErrNilPubkeyConverter diff --git a/node/node_test.go b/node/node_test.go index 152cf98bdd7..b584dc2370b 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -56,11 +56,14 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" factoryTests "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" mockStorage "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -113,7 +116,7 @@ func getAccAdapter(balance *big.Int) *stateMock.AccountsStub { return acc, nil } - accDB.RecreateTrieCalled = func(_ []byte) error { + accDB.RecreateTrieCalled = func(_ common.RootHashHolder) error { return nil } @@ -237,7 +240,7 @@ func TestNode_GetBalanceAccNotFoundShouldReturnEmpty(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -339,7 +342,7 @@ func TestNode_GetCodeHashAccNotFoundShouldReturnEmpty(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -410,7 +413,7 @@ func TestNode_GetKeyValuePairsAccNotFoundShouldReturnEmpty(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -475,7 +478,7 @@ func TestNode_GetKeyValuePairs(t *testing.T) { accDB.GetAccountWithBlockInfoCalled = func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return acc, nil, nil } - accDB.RecreateTrieCalled = func(rootHash []byte) error { + accDB.RecreateTrieCalled = func(rootHash common.RootHashHolder) error { return nil } @@ -535,7 +538,7 @@ func TestNode_GetKeyValuePairs_GetAllLeavesShouldFail(t *testing.T) { accDB.GetAccountWithBlockInfoCalled = func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return acc, nil, nil } - accDB.RecreateTrieCalled = func(rootHash []byte) error { + accDB.RecreateTrieCalled = func(rootHash common.RootHashHolder) error { return nil } @@ -589,7 +592,7 @@ func TestNode_GetKeyValuePairsContextShouldTimeout(t *testing.T) { accDB.GetAccountWithBlockInfoCalled = func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return acc, nil, nil } - accDB.RecreateTrieCalled = func(rootHash []byte) error { + accDB.RecreateTrieCalled = func(rootHash common.RootHashHolder) error { return nil } @@ -628,7 +631,7 @@ func TestNode_GetValueForKeyAccNotFoundShouldReturnEmpty(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -671,7 +674,7 @@ func TestNode_GetValueForKey(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return acc, nil, nil }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -710,7 +713,7 @@ func TestNode_GetESDTDataAccNotFoundShouldReturnEmpty(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -753,7 +756,7 @@ func TestNode_GetESDTData(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return acc, nil, nil }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -808,7 +811,7 @@ func TestNode_GetESDTDataForNFT(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return acc, nil, nil }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -868,7 +871,7 @@ func TestNode_GetAllESDTTokens(t *testing.T) { }) accDB := &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -925,7 +928,7 @@ func TestNode_GetAllESDTTokens_GetAllLeavesShouldFail(t *testing.T) { }) accDB := &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -981,7 +984,7 @@ func TestNode_GetAllESDTTokensContextShouldTimeout(t *testing.T) { }) accDB := &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -1024,7 +1027,7 @@ func TestNode_GetAllESDTsAccNotFoundShouldReturnEmpty(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -1115,7 +1118,7 @@ func TestNode_GetAllESDTTokensShouldReturnEsdtAndFormattedNft(t *testing.T) { }) accDB := &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -1199,7 +1202,7 @@ func TestNode_GetAllIssuedESDTs(t *testing.T) { }) accDB := &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -1285,7 +1288,7 @@ func TestNode_GetESDTsWithRole(t *testing.T) { }) accDB := &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -1364,7 +1367,7 @@ func TestNode_GetESDTsRoles(t *testing.T) { }) accDB := &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -1430,7 +1433,7 @@ func TestNode_GetNFTTokenIDsRegisteredByAddress(t *testing.T) { ) accDB := &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -1487,7 +1490,7 @@ func TestNode_GetNFTTokenIDsRegisteredByAddressContextShouldTimeout(t *testing.T ) accDB := &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -1848,21 +1851,22 @@ func TestGenerateTransaction_CorrectParamsShouldNotError(t *testing.T) { func getDefaultTransactionArgs() *external.ArgsCreateTransaction { return &external.ArgsCreateTransaction{ - Nonce: uint64(0), - Value: new(big.Int).SetInt64(10).String(), - Receiver: "rcv", - ReceiverUsername: []byte("rcvrUsername"), - Sender: "snd", - SenderUsername: []byte("sndrUsername"), - GasPrice: uint64(10), - GasLimit: uint64(20), - DataField: []byte("-"), - SignatureHex: hex.EncodeToString(bytes.Repeat([]byte{0}, 10)), - ChainID: "chainID", - Version: 1, - Options: 0, - Guardian: "", - GuardianSigHex: "", + Nonce: uint64(0), + Value: new(big.Int).SetInt64(10).String(), + Receiver: "rcv", + ReceiverUsername: []byte("rcvrUsername"), + Sender: "snd", + SenderUsername: []byte("sndrUsername"), + GasPrice: uint64(10), + GasLimit: uint64(20), + DataField: []byte("-"), + SignatureHex: hex.EncodeToString(bytes.Repeat([]byte{0}, 10)), + ChainID: "chainID", + Version: 1, + Options: 0, + Guardian: "", + GuardianSigHex: "", + InnerTransactions: nil, } } @@ -3203,12 +3207,11 @@ func TestNode_ValidatorStatisticsApi(t *testing.T) { initialPubKeys[1] = keys[1] initialPubKeys[2] = keys[2] - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) + validatorsInfo := state.NewShardValidatorsInfoMap() for shardId, pubkeysPerShard := range initialPubKeys { - validatorsInfo[shardId] = make([]*state.ValidatorInfo, 0) for _, pubKey := range pubkeysPerShard { - validatorsInfo[shardId] = append(validatorsInfo[shardId], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte(pubKey), ShardId: shardId, List: "", @@ -3230,26 +3233,25 @@ func TestNode_ValidatorStatisticsApi(t *testing.T) { } } - vsp := &mock.ValidatorStatisticsProcessorStub{ + vsp := &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() (i []byte, err error) { return []byte("hash"), nil }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m map[uint32][]*state.ValidatorInfo, err error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m state.ShardValidatorsInfoMapHandler, err error) { return validatorsInfo, nil }, } - validatorProvider := &mock.ValidatorsProviderStub{GetLatestValidatorsCalled: func() map[string]*validator.ValidatorStatistics { - apiResponses := make(map[string]*validator.ValidatorStatistics) + validatorProvider := &stakingcommon.ValidatorsProviderStub{ + GetLatestValidatorsCalled: func() map[string]*validator.ValidatorStatistics { + apiResponses := make(map[string]*validator.ValidatorStatistics) - for _, vis := range validatorsInfo { - for _, vi := range vis { + for _, vi := range validatorsInfo.GetAllValidatorsInfo() { apiResponses[hex.EncodeToString(vi.GetPublicKey())] = &validator.ValidatorStatistics{} } - } - return apiResponses - }, + return apiResponses + }, } processComponents := getDefaultProcessComponents() @@ -3369,7 +3371,7 @@ func TestNode_GetAccountAccNotFoundShouldReturnEmpty(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return nil, nil, state.NewErrAccountNotFoundAtBlock(dummyBlockInfo.forProcessing()) }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -3416,7 +3418,7 @@ func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return accnt, nil, nil }, - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -3447,6 +3449,161 @@ func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { require.Equal(t, testscommon.TestAddressAlice, recovAccnt.OwnerAddress) } +func TestNode_GetAccountAccountWithKeysErrorShouldFail(t *testing.T) { + accnt := createAcc(testscommon.TestPubKeyBob) + _ = accnt.AddToBalance(big.NewInt(1)) + expectedErr := errors.New("expected error") + accnt.SetDataTrie( + &trieMock.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, _ common.KeyBuilder, tlp common.TrieLeafParser) error { + return expectedErr + }, + RootCalled: func() ([]byte, error) { + return nil, nil + }, + }) + + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return accnt, nil, nil + }, + RecreateTrieCalled: func(options common.RootHashHolder) error { + return nil + }, + } + + n := getNodeWithAccount(accDB) + + recovAccnt, blockInfo, err := n.GetAccountWithKeys(testscommon.TestAddressBob, api.AccountQueryOptions{WithKeys: true}, context.Background()) + + require.Equal(t, expectedErr, err) + require.Equal(t, api.AccountResponse{}, recovAccnt) + require.Equal(t, api.BlockInfo{}, blockInfo) +} + +func TestNode_GetAccountAccountWithKeysShouldWork(t *testing.T) { + t.Parallel() + + accnt := createAcc(testscommon.TestPubKeyBob) + _ = accnt.AddToBalance(big.NewInt(1)) + + k1, v1 := []byte("key1"), []byte("value1") + k2, v2 := []byte("key2"), []byte("value2") + + accnt.SetDataTrie( + &trieMock.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, _ common.KeyBuilder, tlp common.TrieLeafParser) error { + go func() { + suffix := append(k1, accnt.AddressBytes()...) + trieLeaf, _ := tlp.ParseLeaf(k1, append(v1, suffix...), core.NotSpecified) + leavesChannels.LeavesChan <- trieLeaf + + suffix = append(k2, accnt.AddressBytes()...) + trieLeaf2, _ := tlp.ParseLeaf(k2, append(v2, suffix...), core.NotSpecified) + leavesChannels.LeavesChan <- trieLeaf2 + + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.Close() + }() + + return nil + }, + RootCalled: func() ([]byte, error) { + return nil, nil + }, + }) + + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return accnt, nil, nil + }, + RecreateTrieCalled: func(options common.RootHashHolder) error { + return nil + }, + } + + n := getNodeWithAccount(accDB) + + recovAccnt, _, err := n.GetAccountWithKeys(testscommon.TestAddressBob, api.AccountQueryOptions{WithKeys: false}, context.Background()) + + require.Nil(t, err) + require.Nil(t, recovAccnt.Pairs) + + recovAccnt, _, err = n.GetAccountWithKeys(testscommon.TestAddressBob, api.AccountQueryOptions{WithKeys: true}, context.Background()) + + require.Nil(t, err) + require.NotNil(t, recovAccnt.Pairs) + require.Equal(t, 2, len(recovAccnt.Pairs)) + require.Equal(t, hex.EncodeToString(v1), recovAccnt.Pairs[hex.EncodeToString(k1)]) + require.Equal(t, hex.EncodeToString(v2), recovAccnt.Pairs[hex.EncodeToString(k2)]) +} + +func TestNode_GetAccountAccountWithKeysNeverUsedAccountShouldWork(t *testing.T) { + t.Parallel() + + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return nil, nil, nil + }, + RecreateTrieCalled: func(options common.RootHashHolder) error { + return nil + }, + } + + n := getNodeWithAccount(accDB) + + recovAccnt, blockInfo, err := n.GetAccountWithKeys(testscommon.TestAddressBob, api.AccountQueryOptions{WithKeys: true}, context.Background()) + + require.Nil(t, err) + require.Equal(t, uint64(0), recovAccnt.Nonce) + require.Equal(t, testscommon.TestAddressBob, recovAccnt.Address) + require.Equal(t, api.BlockInfo{}, blockInfo) +} + +func TestNode_GetAccountAccountWithKeysNilDataTrieShouldWork(t *testing.T) { + t.Parallel() + + accnt := createAcc(testscommon.TestPubKeyBob) + accnt.SetDataTrie(nil) + _ = accnt.AddToBalance(big.NewInt(1)) + + accDB := &stateMock.AccountsStub{ + GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { + return accnt, nil, nil + }, + RecreateTrieCalled: func(options common.RootHashHolder) error { + return nil + }, + } + + n := getNodeWithAccount(accDB) + + recovAccnt, blockInfo, err := n.GetAccountWithKeys(testscommon.TestAddressBob, api.AccountQueryOptions{WithKeys: true}, context.Background()) + + require.Nil(t, err) + require.Equal(t, uint64(0), recovAccnt.Nonce) + require.Equal(t, testscommon.TestAddressBob, recovAccnt.Address) + require.Equal(t, api.BlockInfo{}, blockInfo) +} + +func getNodeWithAccount(accDB *stateMock.AccountsStub) *node.Node { + coreComponents := getDefaultCoreComponents() + dataComponents := getDefaultDataComponents() + stateComponents := getDefaultStateComponents() + args := state.ArgsAccountsRepository{ + FinalStateAccountsWrapper: accDB, + CurrentStateAccountsWrapper: accDB, + HistoricalStateAccountsWrapper: accDB, + } + stateComponents.AccountsRepo, _ = state.NewAccountsRepository(args) + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithDataComponents(dataComponents), + node.WithStateComponents(stateComponents), + ) + return n +} + func TestNode_AppStatusHandlersShouldIncrement(t *testing.T) { t.Parallel() @@ -4058,7 +4215,7 @@ func TestNode_getProofErrWhenComputingProof(t *testing.T) { }, }, nil }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -4744,7 +4901,7 @@ func TestNode_GetGuardianData(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return testAccount, nil, nil }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -4785,7 +4942,7 @@ func TestNode_GetGuardianData(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return testAccount, nil, nil }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -4813,7 +4970,7 @@ func TestNode_GetGuardianData(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return nil, nil, state.NewErrAccountNotFoundAtBlock(providedBlockInfo) }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -4931,7 +5088,7 @@ func TestNode_GetGuardianData(t *testing.T) { GetAccountWithBlockInfoCalled: func(address []byte, options common.RootHashHolder) (vmcommon.AccountHandler, common.BlockInfo, error) { return acc, nil, nil }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return nil }, } @@ -5092,18 +5249,19 @@ func getDefaultCoreComponents() *nodeMockFactory.CoreComponentsMock { MinTransactionVersionCalled: func() uint32 { return 1 }, - WDTimer: &testscommon.WatchdogMock{}, - Alarm: &testscommon.AlarmSchedulerStub{}, - NtpTimer: &testscommon.SyncTimerStub{}, - RoundHandlerField: &testscommon.RoundHandlerMock{}, - EconomicsHandler: &economicsmocks.EconomicsHandlerMock{}, - APIEconomicsHandler: &economicsmocks.EconomicsHandlerMock{}, - RatingsConfig: &testscommon.RatingsInfoMock{}, - RatingHandler: &testscommon.RaterMock{}, - NodesConfig: &testscommon.NodesSetupStub{}, - StartTime: time.Time{}, - EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, - TxVersionCheckHandler: versioning.NewTxVersionChecker(0), + WDTimer: &testscommon.WatchdogMock{}, + Alarm: &testscommon.AlarmSchedulerStub{}, + NtpTimer: &testscommon.SyncTimerStub{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerMock{}, + APIEconomicsHandler: &economicsmocks.EconomicsHandlerMock{}, + RatingsConfig: &testscommon.RatingsInfoMock{}, + RatingHandler: &testscommon.RaterMock{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, + StartTime: time.Time{}, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + TxVersionCheckHandler: versioning.NewTxVersionChecker(0), + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } } @@ -5125,8 +5283,8 @@ func getDefaultProcessComponents() *factoryMock.ProcessComponentsMock { BootSore: &mock.BootstrapStorerMock{}, HeaderSigVerif: &mock.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorMock{}, - ValidatorProvider: &mock.ValidatorsProviderStub{}, + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{}, + ValidatorProvider: &stakingcommon.ValidatorsProviderStub{}, BlockTrack: &mock.BlockTrackerStub{}, PendingMiniBlocksHdl: &mock.PendingMiniBlocksHandlerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, @@ -5139,6 +5297,7 @@ func getDefaultProcessComponents() *factoryMock.ProcessComponentsMock { TxsSenderHandlerField: &txsSenderMock.TxsSenderHandlerMock{}, ScheduledTxsExecutionHandlerInternal: &testscommon.ScheduledTxsExecutionStub{}, HistoryRepositoryInternal: &dblookupext.HistoryRepositoryStub{}, + RelayedTxV3ProcessorField: &processMocks.RelayedTxV3ProcessorMock{}, } } diff --git a/node/trieIterators/delegatedListProcessor_test.go b/node/trieIterators/delegatedListProcessor_test.go index 4718349dcf7..2a92b3a2d9f 100644 --- a/node/trieIterators/delegatedListProcessor_test.go +++ b/node/trieIterators/delegatedListProcessor_test.go @@ -118,7 +118,7 @@ func TestDelegatedListProc_GetDelegatorsListContextShouldTimeout(t *testing.T) { GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { return createScAccount(addressContainer, delegators, addressContainer, time.Second), nil }, - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -165,7 +165,7 @@ func TestDelegatedListProc_GetDelegatorsListShouldWork(t *testing.T) { GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { return createScAccount(addressContainer, delegators, addressContainer, 0), nil }, - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } diff --git a/node/trieIterators/directStakedListProcessor_test.go b/node/trieIterators/directStakedListProcessor_test.go index 552ce65d218..07495736455 100644 --- a/node/trieIterators/directStakedListProcessor_test.go +++ b/node/trieIterators/directStakedListProcessor_test.go @@ -73,7 +73,7 @@ func TestDirectStakedListProc_GetDelegatorsListContextShouldTimeout(t *testing.T GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { return createScAccount(addressContainer, validators, addressContainer, time.Second), nil }, - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -117,7 +117,7 @@ func TestDirectStakedListProc_GetDelegatorsListShouldWork(t *testing.T) { GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { return createScAccount(addressContainer, validators, addressContainer, 0), nil }, - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } diff --git a/node/trieIterators/stakeValuesProcessor_test.go b/node/trieIterators/stakeValuesProcessor_test.go index 43c991ff6d7..fa3bc870cdf 100644 --- a/node/trieIterators/stakeValuesProcessor_test.go +++ b/node/trieIterators/stakeValuesProcessor_test.go @@ -121,7 +121,7 @@ func TestTotalStakedValueProcessor_GetTotalStakedValue_CannotGetAccount(t *testi expectedErr := errors.New("expected error") arg := createMockArgs() arg.Accounts.AccountsAdapter = &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { @@ -162,7 +162,7 @@ func TestTotalStakedValueProcessor_GetTotalStakedValue_CannotCastAccount(t *test GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { return nil, nil }, - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -189,7 +189,7 @@ func TestTotalStakedValueProcessor_GetTotalStakedValue_CannotGetRootHash(t *test GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { return acc, nil }, - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -221,7 +221,7 @@ func TestTotalStakedValueProcessor_GetTotalStakedValue_ContextShouldTimeout(t *t GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { return acc, nil }, - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -256,7 +256,7 @@ func TestTotalStakedValueProcessor_GetTotalStakedValue_CannotGetAllLeaves(t *tes GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { return acc, nil }, - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -327,7 +327,7 @@ func TestTotalStakedValueProcessor_GetTotalStakedValue(t *testing.T) { GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { return acc, nil }, - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } diff --git a/outport/process/alteredaccounts/alteredAccountsProvider.go b/outport/process/alteredaccounts/alteredAccountsProvider.go index e7d855b1ebf..c95fea79b69 100644 --- a/outport/process/alteredaccounts/alteredAccountsProvider.go +++ b/outport/process/alteredaccounts/alteredAccountsProvider.go @@ -223,6 +223,7 @@ func (aap *alteredAccountsProvider) addTokensDataForMarkedAccount( Nonce: nonce, Properties: hex.EncodeToString(esdtToken.Properties), MetaData: aap.convertMetaData(esdtToken.TokenMetaData), + Type: getTokenType(esdtToken.Type, nonce), } if options.WithAdditionalOutportData { accountTokenData.AdditionalData = &alteredAccount.AdditionalAccountTokenData{ @@ -236,6 +237,16 @@ func (aap *alteredAccountsProvider) addTokensDataForMarkedAccount( return nil } +func getTokenType(tokenType uint32, tokenNonce uint64) string { + isNotFungible := tokenNonce != 0 + tokenTypeNotSet := isNotFungible && core.ESDTType(tokenType) == core.NonFungible + if tokenTypeNotSet { + return "" + } + + return core.ESDTType(tokenType).String() +} + func (aap *alteredAccountsProvider) convertMetaData(metaData *esdt.MetaData) *alteredAccount.TokenMetaData { if metaData == nil { return nil diff --git a/outport/process/alteredaccounts/alteredAccountsProvider_test.go b/outport/process/alteredaccounts/alteredAccountsProvider_test.go index 7832e6e55bb..c6fce787c71 100644 --- a/outport/process/alteredaccounts/alteredAccountsProvider_test.go +++ b/outport/process/alteredaccounts/alteredAccountsProvider_test.go @@ -620,6 +620,7 @@ func testExtractAlteredAccountsFromPoolShouldIncludeESDT(t *testing.T) { Nonce: 0, Properties: "6f6b", MetaData: nil, + Type: core.FungibleESDT, }, res[encodedAddr].Tokens[0]) } @@ -627,6 +628,7 @@ func testExtractAlteredAccountsFromPoolShouldIncludeNFT(t *testing.T) { t.Parallel() expectedToken := esdt.ESDigitalToken{ + Type: uint32(core.NonFungible), Value: big.NewInt(37), TokenMetaData: &esdt.MetaData{ Nonce: 38, @@ -757,6 +759,7 @@ func testExtractAlteredAccountsFromPoolShouldIncludeDestinationFromTokensLogsTop receiverOnDestination := []byte("receiver on destination shard") expectedToken := esdt.ESDigitalToken{ Value: big.NewInt(37), + Type: uint32(core.NonFungible), TokenMetaData: &esdt.MetaData{ Nonce: 38, Name: []byte("name"), @@ -903,12 +906,14 @@ func testExtractAlteredAccountsFromPoolMultiTransferEventV2(t *testing.T) { TokenMetaData: &esdt.MetaData{ Nonce: 1, }, + Type: uint32(core.NonFungible), } expectedToken2 := &esdt.ESDigitalToken{ Value: big.NewInt(10), TokenMetaData: &esdt.MetaData{ Nonce: 1, }, + Type: uint32(core.NonFungible), } args := getMockArgs() @@ -1003,6 +1008,7 @@ func testExtractAlteredAccountsFromPoolAddressHasMultipleNfts(t *testing.T) { } expectedToken1 := esdt.ESDigitalToken{ Value: big.NewInt(38), + Type: uint32(core.NonFungible), TokenMetaData: &esdt.MetaData{ Nonce: 5, Name: []byte("nft-0"), @@ -1010,6 +1016,7 @@ func testExtractAlteredAccountsFromPoolAddressHasMultipleNfts(t *testing.T) { } expectedToken2 := esdt.ESDigitalToken{ Value: big.NewInt(37), + Type: uint32(core.NonFungible), TokenMetaData: &esdt.MetaData{ Nonce: 6, Name: []byte("nft-0"), @@ -1124,6 +1131,7 @@ func testExtractAlteredAccountsFromPoolAddressHasMultipleNfts(t *testing.T) { Balance: expectedToken0.Value.String(), Nonce: 0, MetaData: nil, + Type: core.FungibleESDT, }) require.Contains(t, res[encodedAddr].Tokens, &alteredAccount.AccountTokenData{ @@ -1222,6 +1230,7 @@ func testExtractAlteredAccountsFromPoolESDTTransferBalanceNotChanged(t *testing. AdditionalData: &alteredAccount.AdditionalAccountTokenData{ IsNFTCreate: false, }, + Type: core.FungibleESDT, }, }, AdditionalData: &alteredAccount.AdditionalAccountData{ @@ -1241,6 +1250,7 @@ func testExtractAlteredAccountsFromPoolESDTTransferBalanceNotChanged(t *testing. AdditionalData: &alteredAccount.AdditionalAccountTokenData{ IsNFTCreate: false, }, + Type: core.FungibleESDT, }, }, AdditionalData: &alteredAccount.AdditionalAccountData{ @@ -1432,6 +1442,7 @@ func textExtractAlteredAccountsFromPoolNftCreate(t *testing.T) { AdditionalData: &alteredAccount.AdditionalAccountTokenData{ IsNFTCreate: true, }, + Type: core.FungibleESDT, }, }, AdditionalData: &alteredAccount.AdditionalAccountData{ diff --git a/outport/process/alteredaccounts/tokensProcessor.go b/outport/process/alteredaccounts/tokensProcessor.go index bb0839ef44a..687c543bcdf 100644 --- a/outport/process/alteredaccounts/tokensProcessor.go +++ b/outport/process/alteredaccounts/tokensProcessor.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/sharding" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) const ( @@ -116,7 +117,7 @@ func (tp *tokensProcessor) processMultiTransferEvent(event data.EventHandler, ma // N = len(topics) // i := 0; i < N-1; i+=3 // { - // topics[i] --- token identifier + // topics[i] --- token identifier or EGLD token identifier // topics[i+1] --- token nonce // topics[i+2] --- transferred value // } @@ -133,6 +134,12 @@ func (tp *tokensProcessor) processMultiTransferEvent(event data.EventHandler, ma for i := 0; i < numOfTopics-1; i += 3 { tokenID := topics[i] nonceBigInt := big.NewInt(0).SetBytes(topics[i+1]) + + if string(tokenID) == vmcommon.EGLDIdentifier { + tp.processNativeEGLDTransferWithMultiTransfer(destinationAddress, markedAlteredAccounts) + return + } + // process event for the sender address tp.processEsdtDataForAddress(address, nonceBigInt, string(tokenID), markedAlteredAccounts, false) @@ -177,6 +184,24 @@ func (tp *tokensProcessor) processEsdtDataForAddress( } } +func (tp *tokensProcessor) processNativeEGLDTransferWithMultiTransfer(address []byte, markedAlteredAccounts map[string]*markedAlteredAccount) { + if !tp.isSameShard(address) { + return + } + + addressStr := string(address) + _, addressAlreadySelected := markedAlteredAccounts[addressStr] + if addressAlreadySelected { + markedAlteredAccounts[addressStr].balanceChanged = true + return + } + + markedAlteredAccounts[addressStr] = &markedAlteredAccount{ + balanceChanged: true, + } + +} + func (tp *tokensProcessor) isSameShard(address []byte) bool { return tp.shardCoordinator.SelfId() == tp.shardCoordinator.ComputeId(address) } diff --git a/outport/process/alteredaccounts/tokensProcessor_test.go b/outport/process/alteredaccounts/tokensProcessor_test.go index a7a6a65af96..af737a1de94 100644 --- a/outport/process/alteredaccounts/tokensProcessor_test.go +++ b/outport/process/alteredaccounts/tokensProcessor_test.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/process/mock" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" ) @@ -61,3 +62,38 @@ func TestTokenProcessorProcessEventMultiTransferV2(t *testing.T) { require.Equal(t, markedAccount, markedAccounts["addr"]) require.Equal(t, markedAccount, markedAccounts["receiver"]) } + +func TestTokenProcessorProcessEventMultiTransferV2WithEGLD(t *testing.T) { + t.Parallel() + + tp := newTokensProcessor(&mock.ShardCoordinatorStub{}) + + markedAccounts := make(map[string]*markedAlteredAccount) + tp.processEvent(&transaction.Event{ + Identifier: []byte(core.BuiltInFunctionMultiESDTNFTTransfer), + Address: []byte("addr"), + Topics: [][]byte{[]byte("token1"), big.NewInt(0).Bytes(), []byte("2"), []byte(vmcommon.EGLDIdentifier), big.NewInt(0).Bytes(), []byte("3"), []byte("receiver")}, + }, markedAccounts) + + require.Equal(t, 2, len(markedAccounts)) + markedAccount1 := &markedAlteredAccount{ + tokens: map[string]*markedAlteredAccountToken{ + "token1": { + identifier: "token1", + nonce: 0, + }, + }, + } + require.Equal(t, markedAccount1, markedAccounts["addr"]) + + markedAccount2 := &markedAlteredAccount{ + balanceChanged: true, + tokens: map[string]*markedAlteredAccountToken{ + "token1": { + identifier: "token1", + nonce: 0, + }, + }, + } + require.Equal(t, markedAccount2, markedAccounts["receiver"]) +} diff --git a/outport/process/interface.go b/outport/process/interface.go index abcbbe10fec..5fcb19020f3 100644 --- a/outport/process/interface.go +++ b/outport/process/interface.go @@ -34,6 +34,7 @@ type GasConsumedProvider interface { type EconomicsDataHandler interface { ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int + ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 IsInterfaceNil() bool MaxGasLimitPerBlock(shardID uint32) uint64 diff --git a/outport/process/transactionsfee/interface.go b/outport/process/transactionsfee/interface.go index fa09f18076a..53042467442 100644 --- a/outport/process/transactionsfee/interface.go +++ b/outport/process/transactionsfee/interface.go @@ -12,6 +12,7 @@ import ( type FeesProcessorHandler interface { ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int + ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 IsInterfaceNil() bool } diff --git a/outport/process/transactionsfee/transactionsFeeProcessor.go b/outport/process/transactionsfee/transactionsFeeProcessor.go index 593a5d6b83b..6520db7635d 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor.go @@ -90,7 +90,7 @@ func (tep *transactionsFeeProcessor) PutFeeAndGasUsed(pool *outportcore.Transact func (tep *transactionsFeeProcessor) prepareInvalidTxs(pool *outportcore.TransactionPool) { for _, invalidTx := range pool.InvalidTxs { - fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(invalidTx.Transaction, invalidTx.Transaction.GasLimit) + fee := tep.txFeeCalculator.ComputeTxFee(invalidTx.Transaction) invalidTx.FeeInfo.SetGasUsed(invalidTx.Transaction.GetGasLimit()) invalidTx.FeeInfo.SetFee(fee) invalidTx.FeeInfo.SetInitialPaidFee(fee) @@ -103,7 +103,7 @@ func (tep *transactionsFeeProcessor) prepareNormalTxs(transactionsAndScrs *trans gasUsed := tep.txFeeCalculator.ComputeGasLimit(txHandler) fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txHandler, gasUsed) - initialPaidFee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txHandler, txHandler.GetGasLimit()) + initialPaidFee := tep.txFeeCalculator.ComputeTxFee(txHandler) feeInfo := txWithResult.GetFeeInfo() feeInfo.SetGasUsed(gasUsed) @@ -115,6 +115,11 @@ func (tep *transactionsFeeProcessor) prepareNormalTxs(transactionsAndScrs *trans feeInfo.SetFee(initialPaidFee) } + if len(txHandler.GetUserTransactions()) > 0 { + tep.prepareRelayedTxV3WithResults(txHashHex, txWithResult) + continue + } + tep.prepareTxWithResults(txHashHex, txWithResult) } } @@ -137,15 +142,48 @@ func (tep *transactionsFeeProcessor) prepareTxWithResults(txHashHex string, txWi } } - tep.prepareTxWithResultsBasedOnLogs(txWithResults, hasRefund) + tep.prepareTxWithResultsBasedOnLogs(txHashHex, txWithResults, hasRefund) + +} + +func (tep *transactionsFeeProcessor) prepareRelayedTxV3WithResults(txHashHex string, txWithResults *transactionWithResults) { + refundsValue := big.NewInt(0) + for _, scrHandler := range txWithResults.scrs { + scr, ok := scrHandler.GetTxHandler().(*smartContractResult.SmartContractResult) + if !ok { + continue + } + + if !isRefundForRelayed(scr, txWithResults.GetTxHandler()) { + continue + } + + refundsValue.Add(refundsValue, scr.Value) + } + + gasUsed, fee := tep.txFeeCalculator.ComputeGasUsedAndFeeBasedOnRefundValue(txWithResults.GetTxHandler(), refundsValue) + + txWithResults.GetFeeInfo().SetGasUsed(gasUsed) + txWithResults.GetFeeInfo().SetFee(fee) + + hasRefunds := refundsValue.Cmp(big.NewInt(0)) == 1 + tep.prepareTxWithResultsBasedOnLogs(txHashHex, txWithResults, hasRefunds) } func (tep *transactionsFeeProcessor) prepareTxWithResultsBasedOnLogs( + txHashHex string, txWithResults *transactionWithResults, hasRefund bool, ) { - if check.IfNilReflect(txWithResults.log) { + tx := txWithResults.GetTxHandler() + if check.IfNil(tx) { + tep.log.Warn("tep.prepareTxWithResultsBasedOnLogs nil transaction handler", "txHash", txHashHex) + return + } + + res := tep.dataFieldParser.Parse(tx.GetData(), tx.GetSndAddr(), tx.GetRcvAddr(), tep.shardCoordinator.NumberOfShards()) + if check.IfNilReflect(txWithResults.log) || (res.Function == "" && res.Operation == datafield.OperationTransfer) { return } diff --git a/outport/process/transactionsfee/transactionsFeeProcessor_test.go b/outport/process/transactionsfee/transactionsFeeProcessor_test.go index e0efbab8ada..8ff4cf14501 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor_test.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor_test.go @@ -212,11 +212,15 @@ func TestPutFeeAndGasUsedInvalidTxs(t *testing.T) { func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { t.Parallel() + receiver, _ := hex.DecodeString("00000000000000000500d3b28828d62052124f07dcd50ed31b0825f60eee1526") tx1Hash := "h1" tx1 := &outportcore.TxInfo{ Transaction: &transaction.Transaction{ GasLimit: 30000000, GasPrice: 1000000000, + SndAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + RcvAddr: receiver, + Data: []byte("here"), }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, } @@ -226,6 +230,9 @@ func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { Transaction: &transaction.Transaction{ GasLimit: 50000000, GasPrice: 1000000000, + SndAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + RcvAddr: receiver, + Data: []byte("here"), }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, } @@ -520,3 +527,59 @@ func TestPutFeeAndGasUsedScrWithRefund(t *testing.T) { require.Equal(t, big.NewInt(552865000000000), initialTx.GetFeeInfo().GetFee()) require.Equal(t, uint64(50_336_500), initialTx.GetFeeInfo().GetGasUsed()) } + +func TestMoveBalanceWithSignalError(t *testing.T) { + txHash := []byte("e3cdb8b4936fdbee2d3b1244b4c49959df5f90ada683d650019d244e5a64afaf") + initialTx := &outportcore.TxInfo{Transaction: &transaction.Transaction{ + Nonce: 1004, + GasLimit: 12_175_500, + GasPrice: 1000000000, + SndAddr: []byte("erd1s8jr8e8hsvv7c9ehmshcjlpzf9ua5l50qeswa8feshrp6xlz9c7quacmtx"), + RcvAddr: []byte("erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqzllls8a5w6u"), + Data: []byte("start@5465737420526166666c65203120f09f9a80@10000000000000000@0100000002@01000000006082a400@0100000001@01000000023232@"), + }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}} + + scrHash := []byte("scrHash") + scr := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 1005, + SndAddr: []byte("erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqzllls8a5w6u"), + RcvAddr: []byte("erd1s8jr8e8hsvv7c9ehmshcjlpzf9ua5l50qeswa8feshrp6xlz9c7quacmtx"), + PrevTxHash: txHash, + OriginalTxHash: txHash, + Value: big.NewInt(0), + Data: []byte("@sending value to non payable contract"), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } + + pool := &outportcore.TransactionPool{ + SmartContractResults: map[string]*outportcore.SCRInfo{ + hex.EncodeToString(scrHash): scr, + }, + Transactions: map[string]*outportcore.TxInfo{ + hex.EncodeToString(txHash): initialTx, + }, + Logs: []*outportcore.LogData{ + { + Log: &transaction.Log{ + Events: []*transaction.Event{ + { + Identifier: []byte(core.SignalErrorOperation), + }, + }, + }, + TxHash: hex.EncodeToString(txHash), + }, + }, + } + + arg := prepareMockArg() + txsFeeProc, err := NewTransactionsFeeProcessor(arg) + require.NotNil(t, txsFeeProc) + require.Nil(t, err) + + err = txsFeeProc.PutFeeAndGasUsed(pool) + require.Nil(t, err) + require.Equal(t, uint64(225_500), initialTx.GetFeeInfo().GetGasUsed()) +} diff --git a/p2p/disabled/networkMessenger.go b/p2p/disabled/networkMessenger.go index 0216ccdd797..4f854d976bc 100644 --- a/p2p/disabled/networkMessenger.go +++ b/p2p/disabled/networkMessenger.go @@ -175,11 +175,6 @@ func (netMes *networkMessenger) SignUsingPrivateKey(_ []byte, _ []byte) ([]byte, return make([]byte, 0), nil } -// AddPeerTopicNotifier returns nil as it is disabled -func (netMes *networkMessenger) AddPeerTopicNotifier(_ p2p.PeerTopicNotifier) error { - return nil -} - // ProcessReceivedMessage returns nil as it is disabled func (netMes *networkMessenger) ProcessReceivedMessage(_ p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { return nil @@ -190,6 +185,11 @@ func (netMes *networkMessenger) SetDebugger(_ p2p.Debugger) error { return nil } +// HasCompatibleProtocolID returns false as it is disabled +func (netMes *networkMessenger) HasCompatibleProtocolID(_ string) bool { + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (netMes *networkMessenger) IsInterfaceNil() bool { return netMes == nil diff --git a/p2p/interface.go b/p2p/interface.go index dbdabc4248c..e2f7130c6ae 100644 --- a/p2p/interface.go +++ b/p2p/interface.go @@ -102,9 +102,6 @@ type PeersRatingHandler interface { // PeersRatingMonitor represents an entity able to provide peers ratings type PeersRatingMonitor = p2p.PeersRatingMonitor -// PeerTopicNotifier represents an entity able to handle new notifications on a new peer on a topic -type PeerTopicNotifier = p2p.PeerTopicNotifier - // P2PSigningHandler defines the behaviour of a component able to verify p2p message signature type P2PSigningHandler interface { Verify(message MessageP2P) error diff --git a/p2p/mock/peerTopicNotifierStub.go b/p2p/mock/peerTopicNotifierStub.go deleted file mode 100644 index bc1446ae819..00000000000 --- a/p2p/mock/peerTopicNotifierStub.go +++ /dev/null @@ -1,20 +0,0 @@ -package mock - -import "github.com/multiversx/mx-chain-core-go/core" - -// PeerTopicNotifierStub - -type PeerTopicNotifierStub struct { - NewPeerFoundCalled func(pid core.PeerID, topic string) -} - -// NewPeerFound - -func (stub *PeerTopicNotifierStub) NewPeerFound(pid core.PeerID, topic string) { - if stub.NewPeerFoundCalled != nil { - stub.NewPeerFoundCalled(pid, topic) - } -} - -// IsInterfaceNil - -func (stub *PeerTopicNotifierStub) IsInterfaceNil() bool { - return stub == nil -} diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index 703d6326b40..df929214829 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -93,6 +93,7 @@ type ArgBaseProcessor struct { ReceiptsRepository receiptsRepository BlockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler ManagedPeersHolder common.ManagedPeersHolder + SentSignaturesTracker process.SentSignaturesTracker } // ArgShardProcessor holds all dependencies required by the process data factory in order to create diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 5049cece729..0e3c573b23d 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -20,6 +20,8 @@ import ( "github.com/multiversx/mx-chain-core-go/display" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + logger "github.com/multiversx/mx-chain-logger-go" + nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" @@ -35,13 +37,13 @@ import ( "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/process/block/cutoff" "github.com/multiversx/mx-chain-go/process/block/processedMb" + "github.com/multiversx/mx-chain-go/process/headerCheck" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/parsers" "github.com/multiversx/mx-chain-go/storage/storageunit" - logger "github.com/multiversx/mx-chain-logger-go" ) var log = logger.GetOrCreate("process/block") @@ -89,6 +91,7 @@ type baseProcessor struct { processDebugger process.Debugger processStatusHandler common.ProcessStatusHandler managedPeersHolder common.ManagedPeersHolder + sentSignaturesTracker process.SentSignaturesTracker versionedHeaderFactory nodeFactory.VersionedHeaderFactory headerIntegrityVerifier process.HeaderIntegrityVerifier @@ -119,6 +122,7 @@ type baseProcessor struct { mutNonceOfFirstCommittedBlock sync.RWMutex nonceOfFirstCommittedBlock core.OptionalUint64 + extraDelayRequestBlockInfo time.Duration } type bootStorerDataArgs struct { @@ -559,6 +563,9 @@ func checkProcessorParameters(arguments ArgBaseProcessor) error { if check.IfNil(arguments.ManagedPeersHolder) { return process.ErrNilManagedPeersHolder } + if check.IfNil(arguments.SentSignaturesTracker) { + return process.ErrNilSentSignatureTracker + } return nil } @@ -570,7 +577,7 @@ func (bp *baseProcessor) createBlockStarted() error { bp.txCoordinator.CreateBlockStarted() bp.feeHandler.CreateBlockStarted(scheduledGasAndFees) - err := bp.txCoordinator.AddIntermediateTransactions(bp.scheduledTxsExecutionHandler.GetScheduledIntermediateTxs()) + err := bp.txCoordinator.AddIntermediateTransactions(bp.scheduledTxsExecutionHandler.GetScheduledIntermediateTxs(), nil) if err != nil { return err } @@ -1680,7 +1687,7 @@ func (bp *baseProcessor) requestMiniBlocksIfNeeded(headerHandler data.HeaderHand return } - waitTime := common.ExtraDelayForRequestBlockInfo + waitTime := bp.extraDelayRequestBlockInfo roundDifferences := bp.roundHandler.Index() - int64(headerHandler.GetRound()) if roundDifferences > 1 { waitTime = 0 @@ -2110,3 +2117,23 @@ func (bp *baseProcessor) setNonceOfFirstCommittedBlock(nonce uint64) { bp.nonceOfFirstCommittedBlock.HasValue = true bp.nonceOfFirstCommittedBlock.Value = nonce } + +func (bp *baseProcessor) checkSentSignaturesAtCommitTime(header data.HeaderHandler) error { + validatorsGroup, err := headerCheck.ComputeConsensusGroup(header, bp.nodesCoordinator) + if err != nil { + return err + } + + consensusGroup := make([]string, 0, len(validatorsGroup)) + for _, validator := range validatorsGroup { + consensusGroup = append(consensusGroup, string(validator.PubKey())) + } + + signers := headerCheck.ComputeSignersPublicKeys(consensusGroup, header.GetPubKeysBitmap()) + + for _, signer := range signers { + bp.sentSignaturesTracker.ResetCountersForManagedBlockSigner([]byte(signer)) + } + + return nil +} diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 2cf37208f6b..f88d7e8d667 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -34,6 +34,7 @@ import ( "github.com/multiversx/mx-chain-go/process/block/processedMb" "github.com/multiversx/mx-chain-go/process/coordinator" "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" @@ -73,7 +74,7 @@ func createArgBaseProcessor( bootstrapComponents *mock.BootstrapComponentsMock, statusComponents *mock.StatusComponentsMock, ) blproc.ArgBaseProcessor { - nodesCoordinator := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() argsHeaderValidator := blproc.ArgsHeaderValidator{ Hasher: &hashingMocks.HasherMock{}, Marshalizer: &mock.MarshalizerMock{}, @@ -102,7 +103,7 @@ func createArgBaseProcessor( Config: config.Config{}, AccountsDB: accountsDb, ForkDetector: &mock.ForkDetectorMock{}, - NodesCoordinator: nodesCoordinator, + NodesCoordinator: nodesCoordinatorInstance, FeeHandler: &mock.FeeAccumulatorStub{}, RequestHandler: &testscommon.RequestHandlerStub{}, BlockChainHook: &testscommon.BlockChainHookStub{}, @@ -126,6 +127,7 @@ func createArgBaseProcessor( ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, } } @@ -1033,7 +1035,7 @@ func TestBaseProcessor_RevertStateRecreateTrieFailsShouldErr(t *testing.T) { expectedErr := errors.New("err") arguments := CreateMockArguments(createComponentHolderMocks()) arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return expectedErr }, } @@ -3112,3 +3114,54 @@ func TestBaseProcessor_ConcurrentCallsNonceOfFirstCommittedBlock(t *testing.T) { assert.True(t, len(values) <= 1) // we can have the situation when all reads are done before the first set assert.Equal(t, numCalls/2, values[lastValRead]+noValues) } + +func TestBaseProcessor_CheckSentSignaturesAtCommitTime(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + t.Run("nodes coordinator errors, should return error", func(t *testing.T) { + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return nil, expectedErr + } + + arguments := CreateMockArguments(createComponentHolderMocks()) + arguments.SentSignaturesTracker = &testscommon.SentSignatureTrackerStub{ + ResetCountersForManagedBlockSignerCalled: func(signerPk []byte) { + assert.Fail(t, "should have not called ResetCountersManagedBlockSigners") + }, + } + arguments.NodesCoordinator = nodesCoordinatorInstance + bp, _ := blproc.NewShardProcessor(arguments) + + err := bp.CheckSentSignaturesAtCommitTime(&block.Header{}) + assert.Equal(t, expectedErr, err) + }) + t.Run("should work with bitmap", func(t *testing.T) { + validator0, _ := nodesCoordinator.NewValidator([]byte("pk0"), 0, 0) + validator1, _ := nodesCoordinator.NewValidator([]byte("pk1"), 1, 1) + validator2, _ := nodesCoordinator.NewValidator([]byte("pk2"), 2, 2) + + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return []nodesCoordinator.Validator{validator0, validator1, validator2}, nil + } + + resetCountersCalled := make([][]byte, 0) + arguments := CreateMockArguments(createComponentHolderMocks()) + arguments.SentSignaturesTracker = &testscommon.SentSignatureTrackerStub{ + ResetCountersForManagedBlockSignerCalled: func(signerPk []byte) { + resetCountersCalled = append(resetCountersCalled, signerPk) + }, + } + arguments.NodesCoordinator = nodesCoordinatorInstance + bp, _ := blproc.NewShardProcessor(arguments) + + err := bp.CheckSentSignaturesAtCommitTime(&block.Header{ + PubKeysBitmap: []byte{0b00000101}, + }) + assert.Nil(t, err) + + assert.Equal(t, [][]byte{validator0.PubKey(), validator2.PubKey()}, resetCountersCalled) + }) +} diff --git a/process/block/export_test.go b/process/block/export_test.go index c8da250cba6..2332115613c 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/scheduled" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" @@ -167,6 +168,7 @@ func NewShardProcessorEmptyWith3shards( ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, }, } shardProc, err := NewShardProcessor(arguments) @@ -181,6 +183,10 @@ func (mp *metaProcessor) ReceivedShardHeader(header data.HeaderHandler, shardHea mp.receivedShardHeader(header, shardHeaderHash) } +func (mp *metaProcessor) GetDataPool() dataRetriever.PoolsHolder { + return mp.dataPool +} + func (mp *metaProcessor) AddHdrHashToRequestedList(hdr data.HeaderHandler, hdrHash []byte) { mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() defer mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() @@ -559,3 +565,144 @@ func (mp *metaProcessor) GetAllMarshalledTxs(body *block.Body) map[string][][]by func (bp *baseProcessor) SetNonceOfFirstCommittedBlock(nonce uint64) { bp.setNonceOfFirstCommittedBlock(nonce) } + +// CheckSentSignaturesAtCommitTime - +func (bp *baseProcessor) CheckSentSignaturesAtCommitTime(header data.HeaderHandler) error { + return bp.checkSentSignaturesAtCommitTime(header) +} + +// GetHdrForBlock - +func (mp *metaProcessor) GetHdrForBlock() *hdrForBlock { + return mp.hdrsForCurrBlock +} + +// ChannelReceiveAllHeaders - +func (mp *metaProcessor) ChannelReceiveAllHeaders() chan bool { + return mp.chRcvAllHdrs +} + +// ComputeExistingAndRequestMissingShardHeaders - +func (mp *metaProcessor) ComputeExistingAndRequestMissingShardHeaders(metaBlock *block.MetaBlock) (uint32, uint32) { + return mp.computeExistingAndRequestMissingShardHeaders(metaBlock) +} + +// ComputeExistingAndRequestMissingMetaHeaders - +func (sp *shardProcessor) ComputeExistingAndRequestMissingMetaHeaders(header data.ShardHeaderHandler) (uint32, uint32) { + return sp.computeExistingAndRequestMissingMetaHeaders(header) +} + +// GetHdrForBlock - +func (sp *shardProcessor) GetHdrForBlock() *hdrForBlock { + return sp.hdrsForCurrBlock +} + +// ChannelReceiveAllHeaders - +func (sp *shardProcessor) ChannelReceiveAllHeaders() chan bool { + return sp.chRcvAllMetaHdrs +} + +// InitMaps - +func (hfb *hdrForBlock) InitMaps() { + hfb.initMaps() + hfb.resetMissingHdrs() +} + +// Clone - +func (hfb *hdrForBlock) Clone() *hdrForBlock { + return hfb +} + +// SetNumMissingHdrs - +func (hfb *hdrForBlock) SetNumMissingHdrs(num uint32) { + hfb.mutHdrsForBlock.Lock() + hfb.missingHdrs = num + hfb.mutHdrsForBlock.Unlock() +} + +// SetNumMissingFinalityAttestingHdrs - +func (hfb *hdrForBlock) SetNumMissingFinalityAttestingHdrs(num uint32) { + hfb.mutHdrsForBlock.Lock() + hfb.missingFinalityAttestingHdrs = num + hfb.mutHdrsForBlock.Unlock() +} + +// SetHighestHdrNonce - +func (hfb *hdrForBlock) SetHighestHdrNonce(shardId uint32, nonce uint64) { + hfb.mutHdrsForBlock.Lock() + hfb.highestHdrNonce[shardId] = nonce + hfb.mutHdrsForBlock.Unlock() +} + +// HdrInfo - +type HdrInfo struct { + UsedInBlock bool + Hdr data.HeaderHandler +} + +// SetHdrHashAndInfo - +func (hfb *hdrForBlock) SetHdrHashAndInfo(hash string, info *HdrInfo) { + hfb.mutHdrsForBlock.Lock() + hfb.hdrHashAndInfo[hash] = &hdrInfo{ + hdr: info.Hdr, + usedInBlock: info.UsedInBlock, + } + hfb.mutHdrsForBlock.Unlock() +} + +// GetHdrHashMap - +func (hfb *hdrForBlock) GetHdrHashMap() map[string]data.HeaderHandler { + m := make(map[string]data.HeaderHandler) + + hfb.mutHdrsForBlock.RLock() + for hash, hi := range hfb.hdrHashAndInfo { + m[hash] = hi.hdr + } + hfb.mutHdrsForBlock.RUnlock() + + return m +} + +// GetHighestHdrNonce - +func (hfb *hdrForBlock) GetHighestHdrNonce() map[uint32]uint64 { + m := make(map[uint32]uint64) + + hfb.mutHdrsForBlock.RLock() + for shardId, nonce := range hfb.highestHdrNonce { + m[shardId] = nonce + } + hfb.mutHdrsForBlock.RUnlock() + + return m +} + +// GetMissingHdrs - +func (hfb *hdrForBlock) GetMissingHdrs() uint32 { + hfb.mutHdrsForBlock.RLock() + defer hfb.mutHdrsForBlock.RUnlock() + + return hfb.missingHdrs +} + +// GetMissingFinalityAttestingHdrs - +func (hfb *hdrForBlock) GetMissingFinalityAttestingHdrs() uint32 { + hfb.mutHdrsForBlock.RLock() + defer hfb.mutHdrsForBlock.RUnlock() + + return hfb.missingFinalityAttestingHdrs +} + +// GetHdrHashAndInfo - +func (hfb *hdrForBlock) GetHdrHashAndInfo() map[string]*HdrInfo { + hfb.mutHdrsForBlock.RLock() + defer hfb.mutHdrsForBlock.RUnlock() + + m := make(map[string]*HdrInfo) + for hash, hi := range hfb.hdrHashAndInfo { + m[hash] = &HdrInfo{ + UsedInBlock: hi.usedInBlock, + Hdr: hi.hdr, + } + } + + return m +} diff --git a/process/block/metablock.go b/process/block/metablock.go index 49b2504b3ce..fb53f1207d7 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/headerVersionData" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/dataRetriever" processOutport "github.com/multiversx/mx-chain-go/outport/process" "github.com/multiversx/mx-chain-go/process" @@ -136,6 +137,8 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { processStatusHandler: arguments.CoreComponents.ProcessStatusHandler(), blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, managedPeersHolder: arguments.ManagedPeersHolder, + sentSignaturesTracker: arguments.SentSignaturesTracker, + extraDelayRequestBlockInfo: time.Duration(arguments.Config.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds) * time.Millisecond, } mp := metaProcessor{ @@ -437,7 +440,7 @@ func (mp *metaProcessor) processEpochStartMetaBlock( } if mp.isRewardsV2Enabled(header) { - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header.Nonce, header.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header) if err != nil { return err } @@ -452,7 +455,7 @@ func (mp *metaProcessor) processEpochStartMetaBlock( return err } - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header.Nonce, header.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header) if err != nil { return err } @@ -869,7 +872,7 @@ func (mp *metaProcessor) createEpochStartBody(metaBlock *block.MetaBlock) (data. var rewardMiniBlocks block.MiniBlockSlice if mp.isRewardsV2Enabled(metaBlock) { - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock.Nonce, metaBlock.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock) if err != nil { return nil, err } @@ -884,7 +887,7 @@ func (mp *metaProcessor) createEpochStartBody(metaBlock *block.MetaBlock) (data. return nil, err } - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock.Nonce, metaBlock.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock) if err != nil { return nil, err } @@ -1238,6 +1241,11 @@ func (mp *metaProcessor) CommitBlock( mp.setNonceOfFirstCommittedBlock(headerHandler.GetNonce()) mp.updateLastCommittedInDebugger(headerHandler.GetRound()) + errNotCritical := mp.checkSentSignaturesAtCommitTime(headerHandler) + if errNotCritical != nil { + log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) + } + notarizedHeadersHashes, errNotCritical := mp.updateCrossShardInfo(header) if errNotCritical != nil { log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) @@ -1577,7 +1585,8 @@ func (mp *metaProcessor) commitEpochStart(header *block.MetaBlock, body *block.B // RevertStateToBlock recreates the state tries to the root hashes indicated by the provided root hash and header func (mp *metaProcessor) RevertStateToBlock(header data.HeaderHandler, rootHash []byte) error { - err := mp.accountsDB[state.UserAccountsState].RecreateTrie(rootHash) + rootHashHolder := holders.NewDefaultRootHashesHolder(rootHash) + err := mp.accountsDB[state.UserAccountsState].RecreateTrie(rootHashHolder) if err != nil { log.Debug("recreate trie with error for header", "nonce", header.GetNonce(), diff --git a/process/block/metablockRequest_test.go b/process/block/metablockRequest_test.go new file mode 100644 index 00000000000..0718830a43c --- /dev/null +++ b/process/block/metablockRequest_test.go @@ -0,0 +1,653 @@ +package block_test + +import ( + "bytes" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/multiversx/mx-chain-go/dataRetriever" + blockProcess "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/pool" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" +) + +func TestMetaProcessor_computeExistingAndRequestMissingShardHeaders(t *testing.T) { + t.Parallel() + + noOfShards := uint32(2) + td := createTestData() + + t.Run("all referenced shard headers missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersForBlock := mp.GetHdrForBlock() + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + require.Equal(t, uint32(2), numMissing) + require.Equal(t, uint32(2), headersForBlock.GetMissingHdrs()) + // before receiving all missing headers referenced in metaBlock, the number of missing attestations is not updated + require.Equal(t, uint32(0), numAttestationMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 2) + require.Equal(t, uint32(0), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(2), numCallsMissingHeaders.Load()) + }) + t.Run("one referenced shard header present and one missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing header + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(1), numMissing) + require.Equal(t, uint32(1), headersForBlock.GetMissingHdrs()) + // before receiving all missing headers referenced in metaBlock, the number of missing attestations is not updated + require.Equal(t, uint32(0), numAttestationMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 2) + require.Equal(t, uint32(0), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(1), numCallsMissingHeaders.Load()) + }) + t.Run("all referenced shard headers present, all attestation headers missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing headers + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(0), numMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingHdrs()) + require.Equal(t, uint32(2), numAttestationMissing) + require.Equal(t, uint32(2), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 2) + require.Equal(t, uint32(2), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(0), numCallsMissingHeaders.Load()) + }) + t.Run("all referenced shard headers present, one attestation header missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing headers + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + headersPool.AddHeader(td[0].attestationHeaderData.headerHash, td[0].attestationHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(0), numMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingHdrs()) + require.Equal(t, uint32(1), numAttestationMissing) + require.Equal(t, uint32(1), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 3) + require.Equal(t, uint32(1), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(0), numCallsMissingHeaders.Load()) + }) + t.Run("all referenced shard headers present, all attestation headers present", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing headers + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + headersPool.AddHeader(td[0].attestationHeaderData.headerHash, td[0].attestationHeaderData.header) + headersPool.AddHeader(td[1].attestationHeaderData.headerHash, td[1].attestationHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(0), numMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), numAttestationMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 4) + require.Equal(t, uint32(0), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(0), numCallsMissingHeaders.Load()) + }) +} + +func TestMetaProcessor_receivedShardHeader(t *testing.T) { + t.Parallel() + noOfShards := uint32(2) + td := createTestData() + + t.Run("receiving the last used in block shard header", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != attestationNonce { + require.Fail(t, fmt.Sprintf("nonce should have been %d", attestationNonce)) + } + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(1) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(0, td[0].referencedHeaderData.header.GetNonce()-1) + hdrsForBlock.SetHdrHashAndInfo(string(td[0].referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + mp.ReceivedShardHeader(td[0].referencedHeaderData.header, td[0].referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + require.Equal(t, uint32(1), numCalls.Load()) + require.Equal(t, uint32(1), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) + + t.Run("shard header used in block received, not latest", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + // for requesting attestation header + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + require.Equal(t, nonce, attestationNonce, fmt.Sprintf("nonce should have been %d", attestationNonce)) + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + referencedHeaderData := td[1].referencedHeaderData + hdrsForBlock.SetHighestHdrNonce(0, referencedHeaderData.header.GetNonce()-1) + hdrsForBlock.SetHdrHashAndInfo(string(referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + mp.ReceivedShardHeader(referencedHeaderData.header, referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + // not yet requested attestation blocks as still missing one header + require.Equal(t, uint32(0), numCalls.Load()) + // not yet computed + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) + t.Run("all needed shard attestation headers received", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + // for requesting attestation header + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != attestationNonce { + require.Fail(t, "nonce should have been %d", attestationNonce) + } + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(1) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + referencedHeaderData := td[0].referencedHeaderData + hdrsForBlock.SetHighestHdrNonce(0, referencedHeaderData.header.GetNonce()-1) + hdrsForBlock.SetHdrHashAndInfo(string(referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + // receive the missing header + headersPool := mp.GetDataPool().Headers() + headersPool.AddHeader(referencedHeaderData.headerHash, referencedHeaderData.header) + mp.ReceivedShardHeader(td[0].referencedHeaderData.header, referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + require.Equal(t, uint32(1), numCalls.Load()) + require.Equal(t, uint32(1), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + + // needs to be done before receiving the last header otherwise it will + // be blocked waiting on writing to the channel + wg := startWaitingForAllHeadersReceivedSignal(t, mp) + + // receive also the attestation header + attestationHeaderData := td[0].attestationHeaderData + headersPool.AddHeader(attestationHeaderData.headerHash, attestationHeaderData.header) + mp.ReceivedShardHeader(attestationHeaderData.header, attestationHeaderData.headerHash) + wg.Wait() + + require.Equal(t, uint32(1), numCalls.Load()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) + t.Run("all needed shard attestation headers received, when multiple shards headers missing", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + // for requesting attestation header + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != td[shardID].attestationHeaderData.header.GetNonce() { + require.Fail(t, fmt.Sprintf("requested nonce for shard %d should have been %d", shardID, attestationNonce)) + } + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(0, 99) + hdrsForBlock.SetHighestHdrNonce(1, 97) + hdrsForBlock.SetHdrHashAndInfo(string(td[0].referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + hdrsForBlock.SetHdrHashAndInfo(string(td[1].referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + // receive the missing header for shard 0 + headersPool := mp.GetDataPool().Headers() + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + mp.ReceivedShardHeader(td[0].referencedHeaderData.header, td[0].referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + // the attestation header for shard 0 is not requested as the attestation header for shard 1 is missing + // TODO: refactor request logic to request missing attestation headers as soon as possible + require.Equal(t, uint32(0), numCalls.Load()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + + // receive the missing header for shard 1 + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + mp.ReceivedShardHeader(td[1].referencedHeaderData.header, td[1].referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + require.Equal(t, uint32(2), numCalls.Load()) + require.Equal(t, uint32(2), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + + // needs to be done before receiving the last header otherwise it will + // be blocked writing to a channel no one is reading from + wg := startWaitingForAllHeadersReceivedSignal(t, mp) + + // receive also the attestation header + headersPool.AddHeader(td[0].attestationHeaderData.headerHash, td[0].attestationHeaderData.header) + mp.ReceivedShardHeader(td[0].attestationHeaderData.header, td[0].attestationHeaderData.headerHash) + + headersPool.AddHeader(td[1].attestationHeaderData.headerHash, td[1].attestationHeaderData.header) + mp.ReceivedShardHeader(td[1].attestationHeaderData.header, td[1].attestationHeaderData.headerHash) + wg.Wait() + + time.Sleep(100 * time.Millisecond) + // the receive of an attestation header, if not the last one, will trigger a new request of missing attestation headers + // TODO: refactor request logic to not request recently already requested headers + require.Equal(t, uint32(3), numCalls.Load()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) +} + +type receivedAllHeadersSignaler interface { + ChannelReceiveAllHeaders() chan bool +} + +func startWaitingForAllHeadersReceivedSignal(t *testing.T, mp receivedAllHeadersSignaler) *sync.WaitGroup { + wg := &sync.WaitGroup{} + wg.Add(1) + go func(w *sync.WaitGroup) { + receivedAllHeaders := checkReceivedAllHeaders(mp.ChannelReceiveAllHeaders()) + require.True(t, receivedAllHeaders) + wg.Done() + }(wg) + return wg +} + +func checkReceivedAllHeaders(channelReceiveAllHeaders chan bool) bool { + select { + case <-time.After(100 * time.Millisecond): + return false + case <-channelReceiveAllHeaders: + return true + } +} + +func createPoolsHolderForHeaderRequests() dataRetriever.HeadersPool { + headersInPool := make(map[string]data.HeaderHandler) + mutHeadersInPool := sync.RWMutex{} + errNotFound := errors.New("header not found") + + return &pool.HeadersPoolStub{ + AddCalled: func(headerHash []byte, header data.HeaderHandler) { + mutHeadersInPool.Lock() + headersInPool[string(headerHash)] = header + mutHeadersInPool.Unlock() + }, + GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) { + mutHeadersInPool.RLock() + defer mutHeadersInPool.RUnlock() + if h, ok := headersInPool[string(hash)]; ok { + return h, nil + } + return nil, errNotFound + }, + GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + mutHeadersInPool.RLock() + defer mutHeadersInPool.RUnlock() + for hash, h := range headersInPool { + if h.GetNonce() == hdrNonce && h.GetShardID() == shardId { + return []data.HeaderHandler{h}, [][]byte{[]byte(hash)}, nil + } + } + return nil, nil, errNotFound + }, + } +} + +func createMetaProcessorArguments(t *testing.T, noOfShards uint32) *blockProcess.ArgMetaProcessor { + poolMock := dataRetrieverMock.NewPoolsHolderMock() + poolMock.Headers() + coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() + coreComponents.Hash = &hashingMocks.HasherMock{} + dataComponents.DataPool = poolMock + dataComponents.Storage = initStore() + bootstrapComponents.Coordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil + }, + JournalLenCalled: func() int { + return 0 + }, + } + + startHeaders := createGenesisBlocks(bootstrapComponents.ShardCoordinator()) + arguments.BlockTracker = mock.NewBlockTrackerMock(bootstrapComponents.ShardCoordinator(), startHeaders) + arguments.ArgBaseProcessor.RequestHandler = &testscommon.RequestHandlerStub{ + RequestShardHeaderByNonceCalled: func(shardID uint32, nonce uint64) { + require.Fail(t, "should not have been called") + }, + RequestMetaHeaderByNonceCalled: func(nonce uint64) { + require.Fail(t, "should not have been called") + }, + + RequestShardHeaderCalled: func(shardID uint32, hash []byte) { + require.Fail(t, "should not have been called") + }, + RequestMetaHeaderCalled: func(hash []byte) { + require.Fail(t, "should not have been called") + }, + } + + return &arguments +} + +type shardHeaderData struct { + header *block.HeaderV2 + headerHash []byte +} + +type shardTestData struct { + referencedHeaderData *shardHeaderData + attestationHeaderData *shardHeaderData +} + +func createTestData() map[uint32]*shardTestData { + shard0Header1Hash := []byte("sh0TestHash1") + shard0header2Hash := []byte("sh0TestHash2") + shard1Header1Hash := []byte("sh1TestHash1") + shard1header2Hash := []byte("sh1TestHash2") + shard0ReferencedNonce := uint64(100) + shard1ReferencedNonce := uint64(98) + shard0AttestationNonce := shard0ReferencedNonce + 1 + shard1AttestationNonce := shard1ReferencedNonce + 1 + + shardsTestData := map[uint32]*shardTestData{ + 0: { + referencedHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 0, + Round: 100, + Nonce: shard0ReferencedNonce, + }, + }, + headerHash: shard0Header1Hash, + }, + attestationHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 0, + Round: 101, + Nonce: shard0AttestationNonce, + PrevHash: shard0Header1Hash, + }, + }, + headerHash: shard0header2Hash, + }, + }, + 1: { + referencedHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 1, + Round: 100, + Nonce: shard1ReferencedNonce, + }, + }, + headerHash: shard1Header1Hash, + }, + attestationHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 1, + Round: 101, + Nonce: shard1AttestationNonce, + PrevHash: shard1Header1Hash, + }, + }, + headerHash: shard1header2Hash, + }, + }, + } + + return shardsTestData +} + +func createShardInfo(referencedHeaders []*shardHeaderData) []block.ShardData { + shardData := make([]block.ShardData, len(referencedHeaders)) + for i, h := range referencedHeaders { + shardData[i] = block.ShardData{ + HeaderHash: h.headerHash, + Round: h.header.GetRound(), + PrevHash: h.header.GetPrevHash(), + Nonce: h.header.GetNonce(), + ShardID: h.header.GetShardID(), + } + } + + return shardData +} + +func updateRequestsHandlerForCountingRequests( + t *testing.T, + arguments *blockProcess.ArgMetaProcessor, + td map[uint32]*shardTestData, + metaBlock *block.MetaBlock, + numCallsMissingHeaders, numCallsMissingAttestation *atomic.Uint32, +) { + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != attestationNonce { + require.Fail(t, fmt.Sprintf("nonce should have been %d", attestationNonce)) + } + numCallsMissingAttestation.Add(1) + } + requestHandler.RequestShardHeaderCalled = func(shardID uint32, hash []byte) { + for _, sh := range metaBlock.ShardInfo { + if bytes.Equal(sh.HeaderHash, hash) && sh.ShardID == shardID { + numCallsMissingHeaders.Add(1) + return + } + } + + require.Fail(t, fmt.Sprintf("header hash %s not found in meta block", hash)) + } +} diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 0777df9b803..c78f2c5b039 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -150,15 +150,16 @@ func createMockMetaArguments( OutportDataProvider: &outport.OutportDataProviderStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, }, SCToProtocol: &mock.SCToProtocolStub{}, PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, EpochStartDataCreator: &mock.EpochStartDataCreatorStub{}, EpochEconomics: &mock.EpochEconomicsStub{}, - EpochRewardsCreator: &mock.EpochRewardsCreatorStub{}, - EpochValidatorInfoCreator: &mock.EpochValidatorInfoCreatorStub{}, - ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{}, - EpochSystemSCProcessor: &mock.EpochStartSystemSCStub{}, + EpochRewardsCreator: &testscommon.RewardsCreatorStub{}, + EpochValidatorInfoCreator: &testscommon.EpochValidatorInfoCreatorStub{}, + ValidatorStatisticsProcessor: &testscommon.ValidatorStatisticsProcessorStub{}, + EpochSystemSCProcessor: &testscommon.EpochStartSystemSCStub{}, } return arguments } @@ -991,6 +992,7 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { mdp := initDataPool([]byte("tx_hash")) rootHash := []byte("rootHash") hdr := createMetaBlockHeader() + hdr.PubKeysBitmap = []byte{0b11111111} body := &block.Body{} accounts := &stateMock.AccountsStub{ CommitCalled: func() (i []byte, e error) { @@ -1042,6 +1044,12 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { return &block.Header{}, []byte("hash"), nil } arguments.BlockTracker = blockTrackerMock + resetCountersForManagedBlockSignerCalled := false + arguments.SentSignaturesTracker = &testscommon.SentSignatureTrackerStub{ + ResetCountersForManagedBlockSignerCalled: func(signerPk []byte) { + resetCountersForManagedBlockSignerCalled = true + }, + } mp, _ := blproc.NewMetaProcessor(arguments) @@ -1083,6 +1091,7 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { assert.Nil(t, err) assert.True(t, forkDetectorAddCalled) assert.True(t, debuggerMethodWasCalled) + assert.True(t, resetCountersForManagedBlockSignerCalled) // this should sleep as there is an async call to display current header and block in CommitBlock time.Sleep(time.Second) } @@ -1195,11 +1204,11 @@ func TestMetaProcessor_RevertStateRevertPeerStateFailsShouldErr(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{} arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RevertPeerStateCalled: func(header data.MetaHeaderHandler) error { return expectedErr }, @@ -1223,12 +1232,12 @@ func TestMetaProcessor_RevertStateShouldWork(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { recreateTrieWasCalled = true return nil }, } - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RevertPeerStateCalled: func(header data.MetaHeaderHandler) error { revertePeerStateWasCalled = true return nil @@ -3002,7 +3011,7 @@ func TestMetaProcessor_CreateAndProcessBlockCallsProcessAfterFirstEpoch(t *testi dataComponents.DataPool = dPool dataComponents.BlockChain = blkc calledSaveNodesCoordinator := false - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ SaveNodesCoordinatorUpdatesCalled: func(epoch uint32) (bool, error) { calledSaveNodesCoordinator = true return true, nil @@ -3010,7 +3019,7 @@ func TestMetaProcessor_CreateAndProcessBlockCallsProcessAfterFirstEpoch(t *testi } toggleCalled := false - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ ToggleUnStakeUnBondCalled: func(value bool) error { toggleCalled = true assert.Equal(t, value, true) @@ -3146,7 +3155,7 @@ func TestMetaProcessor_CreateNewHeaderValsOK(t *testing.T) { func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { t.Parallel() - header := &block.MetaBlock{ + headerMeta := &block.MetaBlock{ Nonce: 1, Round: 1, PrevHash: []byte("hash1"), @@ -3165,19 +3174,18 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { arguments := createMockMetaArguments(coreC, dataC, bootstrapC, statusC) wasCalled := false - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { assert.True(t, wasCalled) return nil }, } - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { - assert.Equal(t, header.GetEpoch(), epoch) - assert.Equal(t, header.GetNonce(), nonce) + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { + assert.Equal(t, headerMeta, header) wasCalled = true return nil }, @@ -3185,7 +3193,7 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { mp, _ := blproc.NewMetaProcessor(arguments) - err := mp.ProcessEpochStartMetaBlock(header, &block.Body{}) + err := mp.ProcessEpochStartMetaBlock(headerMeta, &block.Body{}) assert.Nil(t, err) }) @@ -3202,23 +3210,21 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { }, } arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) - - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{} + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{} wasCalled := false - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { wasCalled = true return nil }, } - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { - assert.Equal(t, header.GetEpoch(), epoch) - assert.Equal(t, header.GetNonce(), nonce) + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorInfos state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { + assert.Equal(t, headerMeta, header) assert.True(t, wasCalled) return nil }, @@ -3226,7 +3232,7 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { mp, _ := blproc.NewMetaProcessor(arguments) - err := mp.ProcessEpochStartMetaBlock(header, &block.Body{}) + err := mp.ProcessEpochStartMetaBlock(headerMeta, &block.Body{}) assert.Nil(t, err) }) } @@ -3315,7 +3321,7 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) expectedErr := errors.New("expected error") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() ([]byte, error) { return nil, expectedErr }, @@ -3333,8 +3339,8 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) expectedErr := errors.New("expected error") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { return nil, expectedErr }, } @@ -3351,8 +3357,8 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) expectedErr := errors.New("expected error") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ - ProcessRatingsEndOfEpochCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo, epoch uint32) error { + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ + ProcessRatingsEndOfEpochCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, epoch uint32) error { return expectedErr }, } @@ -3368,15 +3374,13 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { t.Parallel() - expectedValidatorsInfo := map[uint32][]*state.ValidatorInfo{ - 0: { - &state.ValidatorInfo{ - ShardId: 1, - RewardAddress: []byte("rewardAddr1"), - AccumulatedFees: big.NewInt(10), - }, - }, - } + expectedValidatorsInfo := state.NewShardValidatorsInfoMap() + _ = expectedValidatorsInfo.Add( + &state.ValidatorInfo{ + ShardId: 1, + RewardAddress: []byte("rewardAddr1"), + AccumulatedFees: big.NewInt(10), + }) rewardMiniBlocks := block.MiniBlockSlice{ &block.MiniBlock{ @@ -3417,11 +3421,11 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { } expectedRootHash := []byte("root hash") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() ([]byte, error) { return expectedRootHash, nil }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { assert.Equal(t, expectedRootHash, rootHash) return expectedValidatorsInfo, nil @@ -3429,32 +3433,31 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { } wasCalled := false - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { wasCalled = true - assert.Equal(t, mb.GetNonce(), nonce) - assert.Equal(t, mb.GetEpoch(), epoch) + assert.Equal(t, mb, header) return nil }, } expectedRewardsForProtocolSustain := big.NewInt(11) - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { assert.Equal(t, expectedValidatorsInfo, validatorsInfo) assert.Equal(t, mb, metaBlock) assert.True(t, wasCalled) return rewardMiniBlocks, nil }, - GetProtocolSustainCalled: func() *big.Int { + GetProtocolSustainabilityRewardsCalled: func() *big.Int { return expectedRewardsForProtocolSustain }, } - arguments.EpochValidatorInfoCreator = &mock.EpochValidatorInfoCreatorStub{ - CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { + arguments.EpochValidatorInfoCreator = &testscommon.EpochValidatorInfoCreatorStub{ + CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { assert.Equal(t, expectedValidatorsInfo, validatorsInfo) return validatorInfoMiniBlocks, nil }, @@ -3497,11 +3500,11 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { } expectedRootHash := []byte("root hash") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() ([]byte, error) { return expectedRootHash, nil }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { assert.Equal(t, expectedRootHash, rootHash) return expectedValidatorsInfo, nil }, @@ -3509,32 +3512,31 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { wasCalled := false expectedRewardsForProtocolSustain := big.NewInt(11) - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { wasCalled = true assert.Equal(t, expectedValidatorsInfo, validatorsInfo) assert.Equal(t, mb, metaBlock) return rewardMiniBlocks, nil }, - GetProtocolSustainCalled: func() *big.Int { + GetProtocolSustainabilityRewardsCalled: func() *big.Int { return expectedRewardsForProtocolSustain }, } - arguments.EpochValidatorInfoCreator = &mock.EpochValidatorInfoCreatorStub{ - CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { + arguments.EpochValidatorInfoCreator = &testscommon.EpochValidatorInfoCreatorStub{ + CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { assert.Equal(t, expectedValidatorsInfo, validatorsInfo) return validatorInfoMiniBlocks, nil }, } - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { assert.True(t, wasCalled) - assert.Equal(t, mb.GetNonce(), nonce) - assert.Equal(t, mb.GetEpoch(), epoch) + assert.Equal(t, mb, header) return nil }, } @@ -3609,8 +3611,7 @@ func TestMetaProcessor_getAllMarshalledTxs(t *testing.T) { t.Parallel() arguments := createMockMetaArguments(createMockComponentHolders()) - - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ CreateMarshalledDataCalled: func(body *block.Body) map[string][][]byte { marshalledData := make(map[string][][]byte) for _, miniBlock := range body.MiniBlocks { @@ -3623,7 +3624,7 @@ func TestMetaProcessor_getAllMarshalledTxs(t *testing.T) { }, } - arguments.EpochValidatorInfoCreator = &mock.EpochValidatorInfoCreatorStub{ + arguments.EpochValidatorInfoCreator = &testscommon.EpochValidatorInfoCreatorStub{ CreateMarshalledDataCalled: func(body *block.Body) map[string][][]byte { marshalledData := make(map[string][][]byte) for _, miniBlock := range body.MiniBlocks { diff --git a/process/block/metrics.go b/process/block/metrics.go index f9c3e0075b3..ce29ddb23f8 100644 --- a/process/block/metrics.go +++ b/process/block/metrics.go @@ -225,12 +225,12 @@ func indexValidatorsRating( return } - for shardID, validatorInfosInShard := range validators { + for shardID, validatorInfosInShard := range validators.GetShardValidatorsInfoMap() { validatorsInfos := make([]*outportcore.ValidatorRatingInfo, 0) for _, validatorInfo := range validatorInfosInShard { validatorsInfos = append(validatorsInfos, &outportcore.ValidatorRatingInfo{ - PublicKey: hex.EncodeToString(validatorInfo.PublicKey), - Rating: float32(validatorInfo.Rating) * 100 / 10000000, + PublicKey: hex.EncodeToString(validatorInfo.GetPublicKey()), + Rating: float32(validatorInfo.GetRating()) * 100 / 10000000, }) } diff --git a/process/block/postprocess/basePostProcess.go b/process/block/postprocess/basePostProcess.go index 058118dd88b..26473387dd7 100644 --- a/process/block/postprocess/basePostProcess.go +++ b/process/block/postprocess/basePostProcess.go @@ -9,10 +9,11 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-logger-go" ) var _ process.DataMarshalizer = (*basePostProcessor)(nil) @@ -28,6 +29,14 @@ type txInfo struct { *txShardInfo } +type processedResult struct { + parentKey []byte + childrenKeys map[string]struct{} + results [][]byte +} + +const defaultCapacity = 100 + var log = logger.GetOrCreate("process/block/postprocess") type basePostProcessor struct { @@ -39,7 +48,7 @@ type basePostProcessor struct { mutInterResultsForBlock sync.Mutex interResultsForBlock map[string]*txInfo - mapProcessedResult map[string][][]byte + mapProcessedResult map[string]*processedResult intraShardMiniBlock *block.MiniBlock economicsFee process.FeeHandler index uint32 @@ -78,7 +87,7 @@ func (bpp *basePostProcessor) CreateBlockStarted() { bpp.mutInterResultsForBlock.Lock() bpp.interResultsForBlock = make(map[string]*txInfo) bpp.intraShardMiniBlock = nil - bpp.mapProcessedResult = make(map[string][][]byte) + bpp.mapProcessedResult = make(map[string]*processedResult) bpp.index = 0 bpp.mutInterResultsForBlock.Unlock() } @@ -170,24 +179,72 @@ func (bpp *basePostProcessor) RemoveProcessedResults(key []byte) [][]byte { bpp.mutInterResultsForBlock.Lock() defer bpp.mutInterResultsForBlock.Unlock() - txHashes, ok := bpp.mapProcessedResult[string(key)] + removedProcessedResults, ok := bpp.removeProcessedResultsAndLinks(string(key)) if !ok { return nil } - for _, txHash := range txHashes { - delete(bpp.interResultsForBlock, string(txHash)) + for _, result := range removedProcessedResults { + delete(bpp.interResultsForBlock, string(result)) } - return txHashes + return removedProcessedResults +} + +func (bpp *basePostProcessor) removeProcessedResultsAndLinks(key string) ([][]byte, bool) { + processedResults, ok := bpp.mapProcessedResult[key] + if !ok { + return nil, ok + } + delete(bpp.mapProcessedResult, key) + + collectedProcessedResultsKeys := make([][]byte, 0, defaultCapacity) + collectedProcessedResultsKeys = append(collectedProcessedResultsKeys, processedResults.results...) + + // go through the childrenKeys and do the same + for childKey := range processedResults.childrenKeys { + childProcessedResults, ok := bpp.removeProcessedResultsAndLinks(childKey) + if !ok { + continue + } + + collectedProcessedResultsKeys = append(collectedProcessedResultsKeys, childProcessedResults...) + } + + // remove link from parentKey + parent, ok := bpp.mapProcessedResult[string(processedResults.parentKey)] + if ok { + delete(parent.childrenKeys, key) + } + + return collectedProcessedResultsKeys, true } // InitProcessedResults will initialize the processed results -func (bpp *basePostProcessor) InitProcessedResults(key []byte) { +func (bpp *basePostProcessor) InitProcessedResults(key []byte, parentKey []byte) { bpp.mutInterResultsForBlock.Lock() defer bpp.mutInterResultsForBlock.Unlock() - bpp.mapProcessedResult[string(key)] = make([][]byte, 0) + pr := &processedResult{ + parentKey: parentKey, + childrenKeys: make(map[string]struct{}), + results: make([][]byte, 0), + } + + bpp.mapProcessedResult[string(key)] = pr + + if len(parentKey) > 0 { + parentPr, ok := bpp.mapProcessedResult[string(parentKey)] + if !ok { + bpp.mapProcessedResult[string(parentKey)] = &processedResult{ + parentKey: nil, + childrenKeys: map[string]struct{}{string(key): {}}, + results: make([][]byte, 0), + } + } else { + parentPr.childrenKeys[string(key)] = struct{}{} + } + } } func (bpp *basePostProcessor) splitMiniBlocksIfNeeded(miniBlocks []*block.MiniBlock) []*block.MiniBlock { @@ -275,13 +332,17 @@ func (bpp *basePostProcessor) addIntermediateTxToResultsForBlock( txHash []byte, sndShardID uint32, rcvShardID uint32, + key []byte, ) { addScrShardInfo := &txShardInfo{receiverShardID: rcvShardID, senderShardID: sndShardID} scrInfo := &txInfo{tx: txHandler, txShardInfo: addScrShardInfo, index: bpp.index} bpp.index++ bpp.interResultsForBlock[string(txHash)] = scrInfo - for key := range bpp.mapProcessedResult { - bpp.mapProcessedResult[key] = append(bpp.mapProcessedResult[key], txHash) + pr, ok := bpp.mapProcessedResult[string(key)] + if !ok { + return } + + pr.results = append(pr.results, txHash) } diff --git a/process/block/postprocess/basePostProcess_test.go b/process/block/postprocess/basePostProcess_test.go new file mode 100644 index 00000000000..021ef491840 --- /dev/null +++ b/process/block/postprocess/basePostProcess_test.go @@ -0,0 +1,171 @@ +package postprocess + +import ( + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/hashing/sha256" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/stretchr/testify/require" +) + +func createBaseProcessors(numProcessors int) []*basePostProcessor { + basePostProcessors := make([]*basePostProcessor, numProcessors) + for i := 0; i < numProcessors; i++ { + basePostProcessors[i] = &basePostProcessor{ + hasher: sha256.NewSha256(), + marshalizer: &marshal.GogoProtoMarshalizer{}, + store: nil, + shardCoordinator: nil, + storageType: 0, + mutInterResultsForBlock: sync.Mutex{}, + interResultsForBlock: make(map[string]*txInfo), + mapProcessedResult: make(map[string]*processedResult), + intraShardMiniBlock: nil, + economicsFee: nil, + index: 0, + } + } + return basePostProcessors +} + +func createTxs(hasher hashing.Hasher, num int) ([]data.TransactionHandler, [][]byte) { + txs := make([]data.TransactionHandler, num) + txHashes := make([][]byte, num) + marshaller := &marshal.GogoProtoMarshalizer{} + + for i := 0; i < num; i++ { + txs[i] = &transaction.Transaction{ + Nonce: uint64(i), + } + marshalledTx, _ := marshaller.Marshal(txs[i]) + txHashes[i] = hasher.Compute(string(marshalledTx)) + } + + return txs, txHashes +} + +func createScrs(hasher hashing.Hasher, num int) ([]data.TransactionHandler, [][]byte) { + scrs := make([]data.TransactionHandler, num) + scrHashes := make([][]byte, num) + marshaller := &marshal.GogoProtoMarshalizer{} + + for i := 0; i < num; i++ { + scrs[i] = &smartContractResult.SmartContractResult{ + Nonce: uint64(i), + OriginalTxHash: []byte("original tx hash"), + } + marshalledTx, _ := marshaller.Marshal(scrs[i]) + scrHashes[i] = hasher.Compute(string(marshalledTx)) + } + + return scrs, scrHashes +} + +func TestBasePostProcessor_InitAddAndRemove(t *testing.T) { + numInstances := 1 + numTxs := 10000 + numScrs := 10000 + headerHash := []byte("headerHash") + miniBlockHash := []byte("miniBlockHash") + _, txHashes := createTxs(sha256.NewSha256(), numTxs) + scrs, scrHash := createScrs(sha256.NewSha256(), numScrs) + + t.Run("InitProcessedResults for header, miniblock and txs, remove one tx, then miniblock", func(t *testing.T) { + basePreProcs := createBaseProcessors(numInstances) + basePreProcs[0].InitProcessedResults(headerHash, nil) + basePreProcs[0].InitProcessedResults(miniBlockHash, headerHash) + require.Len(t, basePreProcs[0].mapProcessedResult[string(headerHash)].childrenKeys, 1) + require.Len(t, basePreProcs[0].mapProcessedResult[string(headerHash)].results, 0) + + for i := 0; i < numTxs; i++ { + basePreProcs[0].InitProcessedResults(txHashes[i], miniBlockHash) + basePreProcs[0].addIntermediateTxToResultsForBlock(scrs[i], scrHash[i], 0, 1, txHashes[i]) + } + require.Equal(t, numTxs, len(basePreProcs[0].mapProcessedResult[string(miniBlockHash)].childrenKeys)) + require.Len(t, basePreProcs[0].mapProcessedResult[string(miniBlockHash)].results, 0) + + for i := 0; i < numTxs; i++ { + require.Len(t, basePreProcs[0].mapProcessedResult[string(txHashes[i])].results, 1) + require.Len(t, basePreProcs[0].mapProcessedResult[string(txHashes[i])].childrenKeys, 0) + } + + results := basePreProcs[0].RemoveProcessedResults(txHashes[0]) + require.Len(t, results, 1) + + // miniBlockHash has numTxs-1 childrenKeys, as one was removed + // each child has one scr registered + results = basePreProcs[0].RemoveProcessedResults(miniBlockHash) + require.Len(t, results, numTxs-1) + + // headerHash no longer has childrenKeys and no direct results, so removing it should return an empty slice + results = basePreProcs[0].RemoveProcessedResults(headerHash) + require.Len(t, results, 0) + }) + t.Run("InitProcessedResults for header, miniblock and txs, remove directly the miniblock", func(t *testing.T) { + basePreProcs := createBaseProcessors(numInstances) + basePreProcs[0].InitProcessedResults(headerHash, nil) + basePreProcs[0].InitProcessedResults(miniBlockHash, headerHash) + require.Len(t, basePreProcs[0].mapProcessedResult[string(headerHash)].childrenKeys, 1) + require.Len(t, basePreProcs[0].mapProcessedResult[string(headerHash)].results, 0) + + for i := 0; i < numTxs; i++ { + basePreProcs[0].InitProcessedResults(txHashes[i], miniBlockHash) + basePreProcs[0].addIntermediateTxToResultsForBlock(scrs[i], scrHash[i], 0, 1, txHashes[i]) + } + require.Equal(t, numTxs, len(basePreProcs[0].mapProcessedResult[string(miniBlockHash)].childrenKeys)) + require.Len(t, basePreProcs[0].mapProcessedResult[string(miniBlockHash)].results, 0) + + for i := 0; i < numTxs; i++ { + require.Len(t, basePreProcs[0].mapProcessedResult[string(txHashes[i])].results, 1) + require.Len(t, basePreProcs[0].mapProcessedResult[string(txHashes[i])].childrenKeys, 0) + } + + // miniBlockHash has numTxs childrenKeys, each child has one scr registered + // removing directly the miniBlock should return numTxs results (the scrs) + results := basePreProcs[0].RemoveProcessedResults(miniBlockHash) + require.Len(t, results, numTxs) + for i := 0; i < numTxs; i++ { + require.Nil(t, basePreProcs[0].mapProcessedResult[string(txHashes[i])]) + } + + // headerHash no longer has childrenKeys and no direct results, so removing it should return an empty slice + results = basePreProcs[0].RemoveProcessedResults(headerHash) + require.Len(t, results, 0) + }) + + t.Run("InitProcessedResults for header, miniblock and txs, remove directly the headerhash", func(t *testing.T) { + basePreProcs := createBaseProcessors(numInstances) + basePreProcs[0].InitProcessedResults(headerHash, nil) + basePreProcs[0].InitProcessedResults(miniBlockHash, headerHash) + require.Len(t, basePreProcs[0].mapProcessedResult[string(headerHash)].childrenKeys, 1) + require.Len(t, basePreProcs[0].mapProcessedResult[string(headerHash)].results, 0) + + for i := 0; i < numTxs; i++ { + basePreProcs[0].InitProcessedResults(txHashes[i], miniBlockHash) + basePreProcs[0].addIntermediateTxToResultsForBlock(scrs[i], scrHash[i], 0, 1, txHashes[i]) + } + require.Equal(t, numTxs, len(basePreProcs[0].mapProcessedResult[string(miniBlockHash)].childrenKeys)) + require.Len(t, basePreProcs[0].mapProcessedResult[string(miniBlockHash)].results, 0) + + for i := 0; i < numTxs; i++ { + require.Len(t, basePreProcs[0].mapProcessedResult[string(txHashes[i])].results, 1) + require.Len(t, basePreProcs[0].mapProcessedResult[string(txHashes[i])].childrenKeys, 0) + } + + // headerHash has one child, miniBlockHash + // miniBlockHash has numTxs childrenKeys, each child has one scr registered + // removing directly the headerHash should return numTxs results (the scrs) for the removed chained childrenKeys + results := basePreProcs[0].RemoveProcessedResults(headerHash) + require.Len(t, results, numTxs) + require.Nil(t, basePreProcs[0].mapProcessedResult[string(miniBlockHash)]) + + for i := 0; i < numTxs; i++ { + require.Nil(t, basePreProcs[0].mapProcessedResult[string(txHashes[i])]) + } + }) +} diff --git a/process/block/postprocess/feeHandler.go b/process/block/postprocess/feeHandler.go index d4248154ef9..5cfc7996ab6 100644 --- a/process/block/postprocess/feeHandler.go +++ b/process/block/postprocess/feeHandler.go @@ -26,13 +26,14 @@ type feeHandler struct { } // NewFeeAccumulator constructor for the fee accumulator -func NewFeeAccumulator() (*feeHandler, error) { - f := &feeHandler{} - f.accumulatedFees = big.NewInt(0) - f.developerFees = big.NewInt(0) - f.mapHashFee = make(map[string]*feeData) - f.mapDependentHashes = make(map[string][]byte) - return f, nil +func NewFeeAccumulator() *feeHandler { + return &feeHandler{ + mut: sync.RWMutex{}, + mapHashFee: make(map[string]*feeData), + accumulatedFees: big.NewInt(0), + developerFees: big.NewInt(0), + mapDependentHashes: make(map[string][]byte), + } } // CreateBlockStarted does the cleanup before creating a new block diff --git a/process/block/postprocess/feeHandler_test.go b/process/block/postprocess/feeHandler_test.go index b74dbab4e0e..060276ba2fb 100644 --- a/process/block/postprocess/feeHandler_test.go +++ b/process/block/postprocess/feeHandler_test.go @@ -13,15 +13,14 @@ import ( func TestNewFeeAccumulator(t *testing.T) { t.Parallel() - feeHandler, err := postprocess.NewFeeAccumulator() - require.Nil(t, err) + feeHandler := postprocess.NewFeeAccumulator() require.NotNil(t, feeHandler) } func TestFeeHandler_CreateBlockStarted(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(50), []byte("txhash")) zeroGasAndFees := process.GetZeroGasAndFees() @@ -37,7 +36,7 @@ func TestFeeHandler_CreateBlockStarted(t *testing.T) { func TestFeeHandler_GetAccumulatedFees(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(50), []byte("txhash")) accumulatedFees := feeHandler.GetAccumulatedFees() @@ -47,7 +46,7 @@ func TestFeeHandler_GetAccumulatedFees(t *testing.T) { func TestFeeHandler_GetDeveloperFees(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(50), []byte("txhash")) devFees := feeHandler.GetDeveloperFees() @@ -57,7 +56,7 @@ func TestFeeHandler_GetDeveloperFees(t *testing.T) { func TestFeeHandler_ProcessTransactionFee(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(1000), big.NewInt(100), []byte("txhash1")) feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(10), []byte("txhash2")) @@ -72,7 +71,7 @@ func TestFeeHandler_ProcessTransactionFee(t *testing.T) { func TestFeeHandler_RevertFees(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(1000), big.NewInt(100), []byte("txhash1")) feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(10), []byte("txhash2")) @@ -89,7 +88,7 @@ func TestFeeHandler_RevertFees(t *testing.T) { func TestFeeHandler_CompleteRevertFeesUserTxs(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() userTxHashes := [][]byte{[]byte("txHash1"), []byte("txHash2"), []byte("txHash3")} originalTxHashes := [][]byte{[]byte("origTxHash1"), []byte("origTxHash2"), []byte("origTxHash3")} @@ -111,7 +110,7 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { originalTxHashes := [][]byte{[]byte("origTxHash1"), []byte("origTxHash2"), []byte("origTxHash3"), []byte("userTxHash4")} t.Run("revert partial originalTxs", func(t *testing.T) { - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(1000), big.NewInt(100), userTxHashes[0], originalTxHashes[0]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(100), big.NewInt(10), userTxHashes[1], originalTxHashes[1]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(10), big.NewInt(1), userTxHashes[2], originalTxHashes[2]) @@ -125,7 +124,7 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { require.Equal(t, big.NewInt(200), devFees) }) t.Run("revert all userTxs", func(t *testing.T) { - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(1000), big.NewInt(100), userTxHashes[0], originalTxHashes[0]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(100), big.NewInt(10), userTxHashes[1], originalTxHashes[1]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(10), big.NewInt(1), userTxHashes[2], originalTxHashes[2]) @@ -139,7 +138,7 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { require.Equal(t, big.NewInt(200), devFees) }) t.Run("revert partial userTxs", func(t *testing.T) { - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(1000), big.NewInt(100), userTxHashes[0], originalTxHashes[0]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(100), big.NewInt(10), userTxHashes[1], originalTxHashes[1]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(10), big.NewInt(1), userTxHashes[2], originalTxHashes[2]) @@ -157,6 +156,6 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { func TestFeeHandler_IsInterfaceNil(t *testing.T) { t.Parallel() - fee, _ := postprocess.NewFeeAccumulator() + fee := postprocess.NewFeeAccumulator() require.False(t, check.IfNil(fee)) } diff --git a/process/block/postprocess/intermediateResults.go b/process/block/postprocess/intermediateResults.go index b10b99a03f8..d706e83623e 100644 --- a/process/block/postprocess/intermediateResults.go +++ b/process/block/postprocess/intermediateResults.go @@ -12,11 +12,12 @@ import ( "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" - logger "github.com/multiversx/mx-chain-logger-go" ) var _ process.IntermediateTransactionHandler = (*intermediateResultsProcessor)(nil) @@ -89,7 +90,7 @@ func NewIntermediateResultsProcessor( shardCoordinator: args.Coordinator, store: args.Store, storageType: dataRetriever.UnsignedTransactionUnit, - mapProcessedResult: make(map[string][][]byte), + mapProcessedResult: make(map[string]*processedResult), economicsFee: args.EconomicsFee, } @@ -237,7 +238,7 @@ func (irp *intermediateResultsProcessor) VerifyInterMiniBlocks(body *block.Body) } // AddIntermediateTransactions adds smart contract results from smart contract processing for cross-shard calls -func (irp *intermediateResultsProcessor) AddIntermediateTransactions(txs []data.TransactionHandler) error { +func (irp *intermediateResultsProcessor) AddIntermediateTransactions(txs []data.TransactionHandler, key []byte) error { irp.mutInterResultsForBlock.Lock() defer irp.mutInterResultsForBlock.Unlock() @@ -261,7 +262,7 @@ func (irp *intermediateResultsProcessor) AddIntermediateTransactions(txs []data. } if log.GetLevel() == logger.LogTrace { - //spew.Sdump is very useful when debugging errors like `receipts hash mismatch` + // spew.Sdump is very useful when debugging errors like `receipts hash mismatch` log.Trace("scr added", "txHash", addScr.PrevTxHash, "hash", scrHash, "nonce", addScr.Nonce, "gasLimit", addScr.GasLimit, "value", addScr.Value, "dump", spew.Sdump(addScr)) } @@ -269,7 +270,7 @@ func (irp *intermediateResultsProcessor) AddIntermediateTransactions(txs []data. sndShId, dstShId := irp.getShardIdsFromAddresses(addScr.SndAddr, addScr.RcvAddr) irp.executionOrderHandler.Add(scrHash) - irp.addIntermediateTxToResultsForBlock(addScr, scrHash, sndShId, dstShId) + irp.addIntermediateTxToResultsForBlock(addScr, scrHash, sndShId, dstShId, key) } return nil diff --git a/process/block/postprocess/intermediateResults_test.go b/process/block/postprocess/intermediateResults_test.go index d659730575a..9ef1f6d0358 100644 --- a/process/block/postprocess/intermediateResults_test.go +++ b/process/block/postprocess/intermediateResults_test.go @@ -13,6 +13,9 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" @@ -23,8 +26,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/storage" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const maxGasLimitPerBlock = uint64(1500000000) @@ -35,15 +36,15 @@ func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { func createMockArgsNewIntermediateResultsProcessor() ArgsNewIntermediateResultsProcessor { args := ArgsNewIntermediateResultsProcessor{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - Coordinator: mock.NewMultiShardsCoordinatorMock(5), - PubkeyConv: createMockPubkeyConverter(), - Store: &storage.ChainStorerStub{}, - BlockType: block.SmartContractResultBlock, - CurrTxs: &mock.TxForCurrentBlockStub{}, - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + Coordinator: mock.NewMultiShardsCoordinatorMock(5), + PubkeyConv: createMockPubkeyConverter(), + Store: &storage.ChainStorerStub{}, + BlockType: block.SmartContractResultBlock, + CurrTxs: &mock.TxForCurrentBlockStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } @@ -198,7 +199,7 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactions(t *testing.T) assert.NotNil(t, irp) assert.Nil(t, err) - err = irp.AddIntermediateTransactions(nil) + err = irp.AddIntermediateTransactions(nil, nil) assert.Nil(t, err) } @@ -216,7 +217,7 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsWrongType(t *te txs := make([]data.TransactionHandler, 0) txs = append(txs, &transaction.Transaction{}) - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Equal(t, process.ErrWrongTypeAssertion, err) } @@ -242,7 +243,7 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsNilSender(t *te shardC.ComputeIdCalled = func(address []byte) uint32 { return shardC.SelfId() } - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Equal(t, process.ErrNilSndAddr, err) } @@ -268,7 +269,7 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsNilReceiver(t * shardC.ComputeIdCalled = func(address []byte) uint32 { return shardC.SelfId() } - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Equal(t, process.ErrNilRcvAddr, err) } @@ -303,7 +304,7 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsShardIdMismatch txs = append(txs, scr) txs = append(txs, scr) - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Equal(t, process.ErrShardIdMissmatch, err) } @@ -329,14 +330,14 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsNegativeValueIn shardC.ComputeIdCalled = func(address []byte) uint32 { return shardC.SelfId() } - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Nil(t, err) shardC.ComputeIdCalled = func(address []byte) uint32 { return shardC.SelfId() + 1 } - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Equal(t, process.ErrNegativeValue, err) } @@ -364,7 +365,7 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsAddrGood(t *tes txs = append(txs, scr) txs = append(txs, scr) - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Nil(t, err) } @@ -394,25 +395,26 @@ func TestIntermediateResultsProcessor_AddIntermediateTransactionsAddAndRevert(t txs = append(txs, &smartContractResult.SmartContractResult{RcvAddr: []byte("rcv"), SndAddr: []byte("snd"), Value: big.NewInt(0), PrevTxHash: txHash, Nonce: 3}) txs = append(txs, &smartContractResult.SmartContractResult{RcvAddr: []byte("rcv"), SndAddr: []byte("snd"), Value: big.NewInt(0), PrevTxHash: txHash, Nonce: 4}) + parentKey := []byte("parentKey") key := []byte("key") - irp.InitProcessedResults(key) + irp.InitProcessedResults(key, parentKey) - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, key) assert.Nil(t, err) irp.mutInterResultsForBlock.Lock() - assert.Equal(t, len(irp.mapProcessedResult[string(key)]), len(txs)) + assert.Equal(t, len(irp.mapProcessedResult[string(key)].results), len(txs)) assert.Equal(t, len(txs), calledCount) irp.mutInterResultsForBlock.Unlock() irp.RemoveProcessedResults(key) irp.mutInterResultsForBlock.Lock() assert.Equal(t, len(irp.interResultsForBlock), 0) - assert.Equal(t, len(irp.mapProcessedResult[string(key)]), len(txs)) + require.Nil(t, irp.mapProcessedResult[string(key)]) irp.mutInterResultsForBlock.Unlock() - irp.InitProcessedResults(key) + irp.InitProcessedResults(key, parentKey) irp.mutInterResultsForBlock.Lock() - assert.Equal(t, len(irp.mapProcessedResult[string(key)]), 0) + assert.Equal(t, len(irp.mapProcessedResult[string(key)].results), 0) irp.mutInterResultsForBlock.Unlock() } @@ -460,7 +462,7 @@ func TestIntermediateResultsProcessor_CreateAllInterMiniBlocksNotCrossShard(t *t txs = append(txs, scr) txs = append(txs, scr) - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Nil(t, err) mbs := irp.CreateAllInterMiniBlocks() @@ -500,7 +502,7 @@ func TestIntermediateResultsProcessor_CreateAllInterMiniBlocksCrossShard(t *test txs = append(txs, &smartContractResult.SmartContractResult{SndAddr: snd, RcvAddr: []byte("recvaddr4"), Value: big.NewInt(0), PrevTxHash: []byte("txHash")}) txs = append(txs, &smartContractResult.SmartContractResult{SndAddr: snd, RcvAddr: []byte("recvaddr5"), Value: big.NewInt(0), PrevTxHash: []byte("txHash")}) - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Nil(t, err) mbs := irp.CreateAllInterMiniBlocks() @@ -545,7 +547,7 @@ func TestIntermediateResultsProcessor_GetNumOfCrossInterMbsAndTxsShouldWork(t *t txs = append(txs, &smartContractResult.SmartContractResult{Nonce: 8, SndAddr: snd, RcvAddr: []byte("3"), Value: big.NewInt(0), PrevTxHash: []byte("txHash")}) txs = append(txs, &smartContractResult.SmartContractResult{Nonce: 9, SndAddr: snd, RcvAddr: []byte("3"), Value: big.NewInt(0), PrevTxHash: []byte("txHash")}) - _ = irp.AddIntermediateTransactions(txs) + _ = irp.AddIntermediateTransactions(txs, nil) numMbs, numTxs := irp.GetNumOfCrossInterMbsAndTxs() assert.Equal(t, 3, numMbs) @@ -644,7 +646,7 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksBodyMiniBlockMissmatc txs = append(txs, &smartContractResult.SmartContractResult{SndAddr: snd, RcvAddr: []byte("recvaddr4"), Value: big.NewInt(0), PrevTxHash: []byte("txHash")}) txs = append(txs, &smartContractResult.SmartContractResult{SndAddr: snd, RcvAddr: []byte("recvaddr5"), Value: big.NewInt(0), PrevTxHash: []byte("txHash")}) - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Nil(t, err) err = irp.VerifyInterMiniBlocks(body) @@ -689,7 +691,7 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksBodyShouldPass(t *tes txs = append(txs, &smartContractResult.SmartContractResult{SndAddr: snd, RcvAddr: []byte("recvaddr4"), Value: big.NewInt(0), PrevTxHash: []byte("txHash")}) txs = append(txs, &smartContractResult.SmartContractResult{SndAddr: snd, RcvAddr: []byte("recvaddr5"), Value: big.NewInt(0), PrevTxHash: []byte("txHash")}) - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Nil(t, err) miniBlock := &block.MiniBlock{ @@ -763,7 +765,7 @@ func TestIntermediateResultsProcessor_SaveCurrentIntermediateTxToStorageShouldSa txs = append(txs, &smartContractResult.SmartContractResult{SndAddr: snd, RcvAddr: []byte("recvaddr4"), Value: big.NewInt(0), PrevTxHash: []byte("txHash")}) txs = append(txs, &smartContractResult.SmartContractResult{SndAddr: snd, RcvAddr: []byte("recvaddr5"), Value: big.NewInt(0), PrevTxHash: []byte("txHash")}) - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Nil(t, err) irp.SaveCurrentIntermediateTxToStorage() @@ -843,7 +845,7 @@ func TestIntermediateResultsProcessor_CreateMarshalizedData(t *testing.T) { currHash, _ = core.CalculateHash(marshalizer, hasher, txs[4]) txHashes = append(txHashes, currHash) - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Nil(t, err) mrsTxs, err := irp.CreateMarshalledData(txHashes) @@ -889,7 +891,7 @@ func TestIntermediateResultsProcessor_GetAllCurrentUsedTxs(t *testing.T) { txs = append(txs, &smartContractResult.SmartContractResult{SndAddr: snd, RcvAddr: snd, Nonce: 1, Value: big.NewInt(0), PrevTxHash: []byte("txHash")}) txs = append(txs, &smartContractResult.SmartContractResult{SndAddr: snd, RcvAddr: snd, Nonce: 2, Value: big.NewInt(0), PrevTxHash: []byte("txHash")}) - err = irp.AddIntermediateTransactions(txs) + err = irp.AddIntermediateTransactions(txs, nil) assert.Nil(t, err) usedTxs := irp.GetAllCurrentFinishedTxs() @@ -958,13 +960,13 @@ func TestIntermediateResultsProcessor_addIntermediateTxToResultsForBlock(t *test irp, _ := NewIntermediateResultsProcessor(createMockArgsNewIntermediateResultsProcessor()) key := []byte("key") - irp.InitProcessedResults(key) + irp.InitProcessedResults(key, nil) tx := &transaction.Transaction{} txHash := []byte("txHash") sndShardID := uint32(1) rcvShardID := uint32(2) - irp.addIntermediateTxToResultsForBlock(tx, txHash, sndShardID, rcvShardID) + irp.addIntermediateTxToResultsForBlock(tx, txHash, sndShardID, rcvShardID, key) require.Equal(t, 1, len(irp.interResultsForBlock)) require.Equal(t, 1, len(irp.mapProcessedResult)) @@ -977,6 +979,6 @@ func TestIntermediateResultsProcessor_addIntermediateTxToResultsForBlock(t *test intermediateResultsHashes, ok := irp.mapProcessedResult[string(key)] require.True(t, ok) - require.Equal(t, 1, len(intermediateResultsHashes)) - assert.Equal(t, txHash, intermediateResultsHashes[0]) + require.Equal(t, 1, len(intermediateResultsHashes.results)) + assert.Equal(t, txHash, intermediateResultsHashes.results[0]) } diff --git a/process/block/postprocess/oneMBPostProcessor.go b/process/block/postprocess/oneMBPostProcessor.go index 5c68c3b194b..6a87e32d6f4 100644 --- a/process/block/postprocess/oneMBPostProcessor.go +++ b/process/block/postprocess/oneMBPostProcessor.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" @@ -54,7 +55,7 @@ func NewOneMiniBlockPostProcessor( shardCoordinator: coordinator, store: store, storageType: storageType, - mapProcessedResult: make(map[string][][]byte), + mapProcessedResult: make(map[string]*processedResult), economicsFee: economicsFee, } @@ -143,7 +144,7 @@ func (opp *oneMBPostProcessor) VerifyInterMiniBlocks(body *block.Body) error { } // AddIntermediateTransactions adds receipts/bad transactions resulting from transaction processor -func (opp *oneMBPostProcessor) AddIntermediateTransactions(txs []data.TransactionHandler) error { +func (opp *oneMBPostProcessor) AddIntermediateTransactions(txs []data.TransactionHandler, key []byte) error { opp.mutInterResultsForBlock.Lock() defer opp.mutInterResultsForBlock.Unlock() @@ -155,7 +156,7 @@ func (opp *oneMBPostProcessor) AddIntermediateTransactions(txs []data.Transactio return err } - opp.addIntermediateTxToResultsForBlock(txs[i], txHash, selfId, selfId) + opp.addIntermediateTxToResultsForBlock(txs[i], txHash, selfId, selfId, key) } return nil diff --git a/process/block/postprocess/oneMBPostProcessor_test.go b/process/block/postprocess/oneMBPostProcessor_test.go index 5151fdc5f88..236f457198e 100644 --- a/process/block/postprocess/oneMBPostProcessor_test.go +++ b/process/block/postprocess/oneMBPostProcessor_test.go @@ -9,13 +9,14 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/stretchr/testify/assert" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/storage" - "github.com/stretchr/testify/assert" ) func TestNewOneMBPostProcessor_NilHasher(t *testing.T) { @@ -154,7 +155,9 @@ func TestOneMBPostProcessor_CreateAllInterMiniBlocksOneMinBlock(t *testing.T) { txs = append(txs, &transaction.Transaction{}) txs = append(txs, &transaction.Transaction{}) - err := irp.AddIntermediateTransactions(txs) + // with no InitProcessedResults, means that the transactions are added as scheduled transactions, not as + // processing results from the execution of other transactions or miniblocks + err := irp.AddIntermediateTransactions(txs, nil) assert.Nil(t, err) mbs := irp.CreateAllInterMiniBlocks() @@ -198,7 +201,7 @@ func TestOneMBPostProcessor_VerifyTooManyBlock(t *testing.T) { txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr4")}) txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr5")}) - err := irp.AddIntermediateTransactions(txs) + err := irp.AddIntermediateTransactions(txs, nil) assert.Nil(t, err) miniBlock := &block.MiniBlock{ @@ -267,7 +270,7 @@ func TestOneMBPostProcessor_VerifyOk(t *testing.T) { txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr4")}) txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr5")}) - err := irp.AddIntermediateTransactions(txs) + err := irp.AddIntermediateTransactions(txs, nil) assert.Nil(t, err) miniBlock := &block.MiniBlock{ diff --git a/process/block/preprocess/basePreProcess.go b/process/block/preprocess/basePreProcess.go index 58534fe4395..56ea615559e 100644 --- a/process/block/preprocess/basePreProcess.go +++ b/process/block/preprocess/basePreProcess.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" @@ -497,9 +498,9 @@ func (bpp *basePreProcess) updateGasConsumedWithGasRefundedAndGasPenalized( gasInfo.totalGasConsumedInSelfShard -= gasToBeSubtracted } -func (bpp *basePreProcess) handleProcessTransactionInit(preProcessorExecutionInfoHandler process.PreProcessorExecutionInfoHandler, txHash []byte) int { +func (bpp *basePreProcess) handleProcessTransactionInit(preProcessorExecutionInfoHandler process.PreProcessorExecutionInfoHandler, txHash []byte, mbHash []byte) int { snapshot := bpp.accounts.JournalLen() - preProcessorExecutionInfoHandler.InitProcessedTxsResults(txHash) + preProcessorExecutionInfoHandler.InitProcessedTxsResults(txHash, mbHash) return snapshot } diff --git a/process/block/preprocess/basePreProcess_test.go b/process/block/preprocess/basePreProcess_test.go index fc17684ca08..221f69c28db 100644 --- a/process/block/preprocess/basePreProcess_test.go +++ b/process/block/preprocess/basePreProcess_test.go @@ -4,22 +4,26 @@ import ( "bytes" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/state" - "github.com/stretchr/testify/assert" ) func TestBasePreProcess_handleProcessTransactionInit(t *testing.T) { t.Parallel() + mbHash := []byte("mb hash") txHash := []byte("tx hash") initProcessedTxsCalled := false preProcessorExecutionInfoHandler := &testscommon.PreProcessorExecutionInfoHandlerMock{ - InitProcessedTxsResultsCalled: func(key []byte) { + InitProcessedTxsResultsCalled: func(key []byte, parentKey []byte) { if !bytes.Equal(key, txHash) { return } + require.Equal(t, mbHash, parentKey) initProcessedTxsCalled = true }, @@ -41,7 +45,7 @@ func TestBasePreProcess_handleProcessTransactionInit(t *testing.T) { }, } - recoveredJournalLen := bp.handleProcessTransactionInit(preProcessorExecutionInfoHandler, txHash) + recoveredJournalLen := bp.handleProcessTransactionInit(preProcessorExecutionInfoHandler, txHash, mbHash) assert.Equal(t, journalLen, recoveredJournalLen) assert.True(t, initProcessedTxsCalled) } diff --git a/process/block/preprocess/gasComputation.go b/process/block/preprocess/gasComputation.go index 628c6de455f..9888917b7f3 100644 --- a/process/block/preprocess/gasComputation.go +++ b/process/block/preprocess/gasComputation.go @@ -426,7 +426,7 @@ func (gc *gasComputation) computeGasProvidedByTxV1( } func (gc *gasComputation) isRelayedTx(txType process.TransactionType) bool { - return txType == process.RelayedTx || txType == process.RelayedTxV2 + return txType == process.RelayedTx || txType == process.RelayedTxV2 || txType == process.RelayedTxV3 } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index d80d8ffbb4c..e695d51e498 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/rewardTx" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" @@ -494,6 +495,11 @@ func (rtp *rewardTxPreprocessor) ProcessMiniBlock( return nil, indexOfLastTxProcessed, false, process.ErrMaxBlockSizeReached } + miniBlockHash, err := core.CalculateHash(rtp.marshalizer, rtp.hasher, miniBlock) + if err != nil { + return nil, indexOfLastTxProcessed, false, err + } + processedTxHashes := make([][]byte, 0) for txIndex = indexOfFirstTxToBeProcessed; txIndex < len(miniBlockRewardTxs); txIndex++ { if !haveTime() { @@ -506,7 +512,7 @@ func (rtp *rewardTxPreprocessor) ProcessMiniBlock( break } - snapshot := rtp.handleProcessTransactionInit(preProcessorExecutionInfoHandler, miniBlockTxHashes[txIndex]) + snapshot := rtp.handleProcessTransactionInit(preProcessorExecutionInfoHandler, miniBlockTxHashes[txIndex], miniBlockHash) rtp.txExecutionOrderHandler.Add(miniBlockTxHashes[txIndex]) err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[txIndex]) diff --git a/process/block/preprocess/smartContractResults.go b/process/block/preprocess/smartContractResults.go index 471c94360bd..3ac910a1834 100644 --- a/process/block/preprocess/smartContractResults.go +++ b/process/block/preprocess/smartContractResults.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" @@ -571,6 +572,11 @@ func (scr *smartContractResults) ProcessMiniBlock( return nil, indexOfLastTxProcessed, false, process.ErrMaxBlockSizeReached } + miniBlockHash, err := core.CalculateHash(scr.marshalizer, scr.hasher, miniBlock) + if err != nil { + return nil, indexOfLastTxProcessed, false, err + } + gasInfo := gasConsumedInfo{ gasConsumedByMiniBlockInReceiverShard: uint64(0), gasConsumedByMiniBlocksInSenderShard: uint64(0), @@ -633,7 +639,7 @@ func (scr *smartContractResults) ProcessMiniBlock( break } - snapshot := scr.handleProcessTransactionInit(preProcessorExecutionInfoHandler, miniBlockTxHashes[txIndex]) + snapshot := scr.handleProcessTransactionInit(preProcessorExecutionInfoHandler, miniBlockTxHashes[txIndex], miniBlockHash) scr.txExecutionOrderHandler.Add(miniBlockTxHashes[txIndex]) _, err = scr.scrProcessor.ProcessSmartContractResult(miniBlockScrs[txIndex]) diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index fd53f95aad5..eb24585a55b 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -15,6 +15,9 @@ import ( "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + logger "github.com/multiversx/mx-chain-logger-go" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" @@ -23,8 +26,6 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/txcache" - logger "github.com/multiversx/mx-chain-logger-go" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) var _ process.DataMarshalizer = (*transactions)(nil) @@ -1508,6 +1509,11 @@ func (txs *transactions) ProcessMiniBlock( return nil, indexOfLastTxProcessed, false, process.ErrMaxBlockSizeReached } + miniBlockHash, err := core.CalculateHash(txs.marshalizer, txs.hasher, miniBlock) + if err != nil { + return nil, indexOfLastTxProcessed, false, err + } + var totalGasConsumed uint64 if scheduledMode { totalGasConsumed = txs.gasHandler.TotalGasProvidedAsScheduled() @@ -1587,7 +1593,8 @@ func (txs *transactions) ProcessMiniBlock( miniBlockTxs[txIndex], miniBlockTxHashes[txIndex], &gasInfo, - gasProvidedByTxInSelfShard) + gasProvidedByTxInSelfShard, + miniBlockHash) if err != nil { break } @@ -1646,9 +1653,10 @@ func (txs *transactions) processInNormalMode( txHash []byte, gasInfo *gasConsumedInfo, gasProvidedByTxInSelfShard uint64, + mbHash []byte, ) error { - snapshot := txs.handleProcessTransactionInit(preProcessorExecutionInfoHandler, txHash) + snapshot := txs.handleProcessTransactionInit(preProcessorExecutionInfoHandler, txHash, mbHash) txs.txExecutionOrderHandler.Add(txHash) _, err := txs.txProcessor.ProcessTransaction(tx) diff --git a/process/block/preprocess/transactionsV2_test.go b/process/block/preprocess/transactionsV2_test.go index 50203a1a5ae..9d4fb1cf686 100644 --- a/process/block/preprocess/transactionsV2_test.go +++ b/process/block/preprocess/transactionsV2_test.go @@ -15,9 +15,9 @@ import ( "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/testscommon" + commonMocks "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" - commonMocks "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 91e2d79d29f..a73b4e0be6d 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/headerVersionData" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/dataRetriever" processOutport "github.com/multiversx/mx-chain-go/outport/process" "github.com/multiversx/mx-chain-go/process" @@ -121,6 +122,8 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { processStatusHandler: arguments.CoreComponents.ProcessStatusHandler(), blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, managedPeersHolder: arguments.ManagedPeersHolder, + sentSignaturesTracker: arguments.SentSignaturesTracker, + extraDelayRequestBlockInfo: time.Duration(arguments.Config.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds) * time.Millisecond, } sp := shardProcessor{ @@ -401,8 +404,8 @@ func (sp *shardProcessor) requestEpochStartInfo(header data.ShardHeaderHandler, // RevertStateToBlock recreates the state tries to the root hashes indicated by the provided root hash and header func (sp *shardProcessor) RevertStateToBlock(header data.HeaderHandler, rootHash []byte) error { - - err := sp.accountsDB[state.UserAccountsState].RecreateTrie(rootHash) + rootHashHolder := holders.NewDefaultRootHashesHolder(rootHash) + err := sp.accountsDB[state.UserAccountsState].RecreateTrie(rootHashHolder) if err != nil { log.Debug("recreate trie with error for header", "nonce", header.GetNonce(), @@ -988,7 +991,12 @@ func (sp *shardProcessor) CommitBlock( sp.updateLastCommittedInDebugger(headerHandler.GetRound()) - errNotCritical := sp.updateCrossShardInfo(processedMetaHdrs) + errNotCritical := sp.checkSentSignaturesAtCommitTime(headerHandler) + if errNotCritical != nil { + log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) + } + + errNotCritical = sp.updateCrossShardInfo(processedMetaHdrs) if errNotCritical != nil { log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) } diff --git a/process/block/shardblockRequest_test.go b/process/block/shardblockRequest_test.go new file mode 100644 index 00000000000..2440c6ecba5 --- /dev/null +++ b/process/block/shardblockRequest_test.go @@ -0,0 +1,584 @@ +package block_test + +import ( + "bytes" + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/stretchr/testify/require" + + blproc "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" +) + +type headerData struct { + hash []byte + header data.HeaderHandler +} + +type shardBlockTestData struct { + headerData []*headerData +} + +func TestShardProcessor_RequestMissingFinalityAttestingHeaders(t *testing.T) { + t.Parallel() + + t.Run("missing attesting meta header", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + metaChainData := testData[core.MetachainShardId] + numCalls := atomic.Uint32{} + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + attestationNonce := metaChainData.headerData[1].header.GetNonce() + require.Equal(t, attestationNonce, nonce, fmt.Sprintf("nonce should have been %d", attestationNonce)) + numCalls.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + + metaBlockData := metaChainData.headerData[0] + // not adding the confirmation metaBlock to the headers pool means it will be missing and requested + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + res := sp.RequestMissingFinalityAttestingHeaders() + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), res) + require.Equal(t, uint32(1), numCalls.Load()) + }) + t.Run("no missing attesting meta header", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + metaChainData := testData[core.MetachainShardId] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "should not request meta header by nonce") + } + sp, _ := blproc.NewShardProcessor(arguments) + + headersDataPool := arguments.DataComponents.Datapool().Headers() + require.NotNil(t, headersDataPool) + metaBlockData := metaChainData.headerData[0] + confirmationMetaBlockData := metaChainData.headerData[1] + headersDataPool.AddHeader(confirmationMetaBlockData.hash, confirmationMetaBlockData.header) + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + res := sp.RequestMissingFinalityAttestingHeaders() + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(0), res) + }) +} + +func TestShardProcessor_computeExistingAndRequestMissingMetaHeaders(t *testing.T) { + t.Parallel() + + shard1ID := uint32(1) + t.Run("one referenced metaBlock missing will be requested", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + metaChainData := testData[core.MetachainShardId] + shard1Data := testData[shard1ID] + numCalls := atomic.Uint32{} + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + // should only be called when requesting attestation meta header block + require.Fail(t, "should not request meta header by nonce") + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Equal(t, metaChainData.headerData[1].hash, hash) + numCalls.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + + metaBlockData := metaChainData.headerData[0] + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + // first of the 2 referenced headers is added, the other will be missing + headersDataPool := arguments.DataComponents.Datapool().Headers() + headersDataPool.AddHeader(metaBlockData.hash, metaBlockData.header) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), missingHeaders) + require.Equal(t, uint32(0), missingFinalityAttestingHeaders) + require.Equal(t, uint32(1), numCalls.Load()) + }) + t.Run("multiple referenced metaBlocks missing will be requested", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + numCalls := atomic.Uint32{} + metaChainData := testData[core.MetachainShardId] + shard1Data := testData[shard1ID] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + // not yet requesting the attestation metaBlock + require.Fail(t, "should not request meta header by nonce") + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + if !(bytes.Equal(hash, metaChainData.headerData[0].hash) || bytes.Equal(hash, metaChainData.headerData[1].hash)) { + require.Fail(t, "other requests than the expected 2 metaBlocks are not expected") + } + + numCalls.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + metaBlockData := testData[core.MetachainShardId].headerData[0] + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(2), missingHeaders) + require.Equal(t, uint32(0), missingFinalityAttestingHeaders) + require.Equal(t, uint32(2), numCalls.Load()) + }) + t.Run("all referenced metaBlocks existing with missing attestation, will request the attestation metaBlock", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + numCallsMissing := atomic.Uint32{} + numCallsAttestation := atomic.Uint32{} + metaChainData := testData[core.MetachainShardId] + shard1Data := testData[shard1ID] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + // not yet requesting the attestation metaBlock + require.Equal(t, metaChainData.headerData[1].header.GetNonce()+1, nonce) + numCallsAttestation.Add(1) + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + if !(bytes.Equal(hash, metaChainData.headerData[0].hash) || bytes.Equal(hash, metaChainData.headerData[1].hash)) { + require.Fail(t, "other requests than the expected 2 metaBlocks are not expected") + } + + numCallsMissing.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + headersDataPool := arguments.DataComponents.Datapool().Headers() + headersDataPool.AddHeader(metaChainData.headerData[0].hash, metaChainData.headerData[0].header) + headersDataPool.AddHeader(metaChainData.headerData[1].hash, metaChainData.headerData[1].header) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(0), missingHeaders) + require.Equal(t, uint32(1), missingFinalityAttestingHeaders) + require.Equal(t, uint32(0), numCallsMissing.Load()) + require.Equal(t, uint32(1), numCallsAttestation.Load()) + }) + t.Run("all referenced metaBlocks existing and existing attestation metaBlock will not request", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + numCallsMissing := atomic.Uint32{} + numCallsAttestation := atomic.Uint32{} + shard1Data := testData[shard1ID] + metaChainData := testData[core.MetachainShardId] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + numCallsAttestation.Add(1) + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + numCallsMissing.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + headersDataPool := arguments.DataComponents.Datapool().Headers() + headersDataPool.AddHeader(metaChainData.headerData[0].hash, metaChainData.headerData[0].header) + headersDataPool.AddHeader(metaChainData.headerData[1].hash, metaChainData.headerData[1].header) + attestationMetaBlock := &block.MetaBlock{ + Nonce: 102, + Round: 102, + PrevHash: metaChainData.headerData[1].hash, + ShardInfo: []block.ShardData{}, + } + attestationMetaBlockHash := []byte("attestationHash") + + headersDataPool.AddHeader(attestationMetaBlockHash, attestationMetaBlock) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(0), missingHeaders) + require.Equal(t, uint32(0), missingFinalityAttestingHeaders) + require.Equal(t, uint32(0), numCallsMissing.Load()) + require.Equal(t, uint32(0), numCallsAttestation.Load()) + }) +} + +func TestShardProcessor_receivedMetaBlock(t *testing.T) { + t.Parallel() + + t.Run("received non referenced metaBlock, while still having missing referenced metaBlocks", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + firstMissingMetaBlockData := testData[core.MetachainShardId].headerData[0] + secondMissingMetaBlockData := testData[core.MetachainShardId].headerData[1] + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + highestHeaderNonce := firstMissingMetaBlockData.header.GetNonce() - 1 + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, highestHeaderNonce) + hdrsForBlock.SetHdrHashAndInfo(string(firstMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + hdrsForBlock.SetHdrHashAndInfo(string(secondMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + otherMetaBlock := &block.MetaBlock{ + Nonce: 102, + Round: 102, + PrevHash: []byte("other meta block prev hash"), + } + + otherMetaBlockHash := []byte("other meta block hash") + sp.ReceivedMetaBlock(otherMetaBlock, otherMetaBlockHash) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(2), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + highestHeaderNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, highestHeaderNonce, highestHeaderNonces[core.MetachainShardId]) + }) + t.Run("received missing referenced metaBlock, other referenced metaBlock still missing", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + firstMissingMetaBlockData := testData[core.MetachainShardId].headerData[0] + secondMissingMetaBlockData := testData[core.MetachainShardId].headerData[1] + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + highestHeaderNonce := firstMissingMetaBlockData.header.GetNonce() - 1 + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, highestHeaderNonce) + hdrsForBlock.SetHdrHashAndInfo(string(firstMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + hdrsForBlock.SetHdrHashAndInfo(string(secondMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + sp.ReceivedMetaBlock(firstMissingMetaBlockData.header, firstMissingMetaBlockData.hash) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + highestHeaderNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, firstMissingMetaBlockData.header.GetNonce(), highestHeaderNonces[core.MetachainShardId]) + }) + t.Run("received non missing referenced metaBlock", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + notMissingReferencedMetaBlockData := testData[core.MetachainShardId].headerData[0] + missingMetaBlockData := testData[core.MetachainShardId].headerData[1] + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + highestHeaderNonce := notMissingReferencedMetaBlockData.header.GetNonce() - 1 + hdrsForBlock.SetNumMissingHdrs(1) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, highestHeaderNonce) + hdrsForBlock.SetHdrHashAndInfo(string(notMissingReferencedMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: notMissingReferencedMetaBlockData.header, + }) + hdrsForBlock.SetHdrHashAndInfo(string(missingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + headersDataPool := arguments.DataComponents.Datapool().Headers() + require.NotNil(t, headersDataPool) + headersDataPool.AddHeader(notMissingReferencedMetaBlockData.hash, notMissingReferencedMetaBlockData.header) + + sp.ReceivedMetaBlock(notMissingReferencedMetaBlockData.header, notMissingReferencedMetaBlockData.hash) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + hdrsForBlockHighestNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, highestHeaderNonce, hdrsForBlockHighestNonces[core.MetachainShardId]) + }) + t.Run("received missing attestation metaBlock", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + referencedMetaBlock := testData[core.MetachainShardId].headerData[0] + lastReferencedMetaBlock := testData[core.MetachainShardId].headerData[1] + attestationMetaBlockHash := []byte("attestation meta block hash") + attestationMetaBlock := &block.MetaBlock{ + Nonce: lastReferencedMetaBlock.header.GetNonce() + 1, + Round: lastReferencedMetaBlock.header.GetRound() + 1, + PrevHash: lastReferencedMetaBlock.hash, + } + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + hdrsForBlock.SetNumMissingHdrs(0) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(1) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, lastReferencedMetaBlock.header.GetNonce()) + hdrsForBlock.SetHdrHashAndInfo(string(referencedMetaBlock.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: referencedMetaBlock.header, + }) + hdrsForBlock.SetHdrHashAndInfo(string(lastReferencedMetaBlock.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: lastReferencedMetaBlock.header, + }) + + headersDataPool := arguments.DataComponents.Datapool().Headers() + require.NotNil(t, headersDataPool) + headersDataPool.AddHeader(referencedMetaBlock.hash, referencedMetaBlock.header) + headersDataPool.AddHeader(lastReferencedMetaBlock.hash, lastReferencedMetaBlock.header) + headersDataPool.AddHeader(attestationMetaBlockHash, attestationMetaBlock) + wg := startWaitingForAllHeadersReceivedSignal(t, sp) + + sp.ReceivedMetaBlock(attestationMetaBlock, attestationMetaBlockHash) + wg.Wait() + + require.Equal(t, uint32(0), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + hdrsForBlockHighestNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, lastReferencedMetaBlock.header.GetNonce(), hdrsForBlockHighestNonces[core.MetachainShardId]) + }) +} + +func shardBlockRequestTestInit(t *testing.T) (blproc.ArgShardProcessor, *testscommon.RequestHandlerStub) { + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + poolMock := dataRetrieverMock.NewPoolsHolderMock() + dataComponents.DataPool = poolMock + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + poolsHolderAsInterface := arguments.DataComponents.Datapool() + poolsHolder, ok := poolsHolderAsInterface.(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + return arguments, requestHandler +} + +func createShardProcessorTestData() map[uint32]*shardBlockTestData { + // shard 0 miniblocks + mbHash1 := []byte("mb hash 1") + mbHash2 := []byte("mb hash 2") + mbHash3 := []byte("mb hash 3") + + // shard 1 miniblocks + mbHash4 := []byte("mb hash 4") + mbHash5 := []byte("mb hash 5") + mbHash6 := []byte("mb hash 6") + + prevMetaBlockHash := []byte("prev meta block hash") + metaBlockHash := []byte("meta block hash") + metaConfirmationHash := []byte("confirmation meta block hash") + + shard0Block0Hash := []byte("shard 0 block 0 hash") + shard0Block1Hash := []byte("shard 0 block 1 hash") + shard0Block2Hash := []byte("shard 0 block 2 hash") + + shard1Block0Hash := []byte("shard 1 block 0 hash") + shard1Block1Hash := []byte("shard 1 block 1 hash") + shard1Block2Hash := []byte("shard 1 block 2 hash") + + metaBlock := &block.MetaBlock{ + Nonce: 100, + Round: 100, + PrevHash: prevMetaBlockHash, + ShardInfo: []block.ShardData{ + { + ShardID: 0, + HeaderHash: shard0Block1Hash, + PrevHash: shard0Block0Hash, + ShardMiniBlockHeaders: []block.MiniBlockHeader{ + {Hash: mbHash1, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash2, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash3, SenderShardID: 0, ReceiverShardID: 1}, + }, + }, + }, + } + metaConfirmationBlock := &block.MetaBlock{ + Nonce: 101, + Round: 101, + PrevHash: metaBlockHash, + ShardInfo: []block.ShardData{}, + } + + shard0Block1 := &block.Header{ + ShardID: 0, + PrevHash: shard0Block0Hash, + Nonce: 98, + Round: 98, + MiniBlockHeaders: []block.MiniBlockHeader{ + {Hash: mbHash1, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash2, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash3, SenderShardID: 0, ReceiverShardID: 1}, + }, + } + + shard0Block2 := &block.Header{ + ShardID: 0, + PrevHash: shard0Block1Hash, + Nonce: 99, + Round: 99, + MiniBlockHeaders: []block.MiniBlockHeader{}, + } + + shard1Block1 := &block.Header{ + ShardID: 1, + PrevHash: shard1Block0Hash, + MetaBlockHashes: [][]byte{prevMetaBlockHash}, + Nonce: 102, + Round: 102, + MiniBlockHeaders: []block.MiniBlockHeader{ + {Hash: mbHash4, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash5, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash6, SenderShardID: 0, ReceiverShardID: 1}, + }, + } + + shard1Block2 := &block.Header{ + ShardID: 1, + PrevHash: shard1Block1Hash, + MetaBlockHashes: [][]byte{metaBlockHash, metaConfirmationHash}, + Nonce: 103, + Round: 103, + MiniBlockHeaders: []block.MiniBlockHeader{}, + } + + sbd := map[uint32]*shardBlockTestData{ + 0: { + headerData: []*headerData{ + { + hash: shard0Block1Hash, + header: shard0Block1, + }, + { + hash: shard0Block2Hash, + header: shard0Block2, + }, + }, + }, + 1: { + headerData: []*headerData{ + { + hash: shard1Block1Hash, + header: shard1Block1, + }, + { + hash: shard1Block2Hash, + header: shard1Block2, + }, + }, + }, + core.MetachainShardId: { + headerData: []*headerData{ + { + hash: metaBlockHash, + header: metaBlock, + }, + { + hash: metaConfirmationHash, + header: metaConfirmationBlock, + }, + }, + }, + } + + return sbd +} diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 4b9b95a8c56..39797f8db0c 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -22,6 +22,10 @@ import ( "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" @@ -45,9 +49,6 @@ import ( stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const MaxGasLimitPerBlock = uint64(100000) @@ -1677,21 +1678,6 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. assert.Equal(t, err, process.ErrTimeIsOut) } -// -------- requestMissingFinalityAttestingHeaders -func TestShardProcessor_RequestMissingFinalityAttestingHeaders(t *testing.T) { - t.Parallel() - - tdp := dataRetrieverMock.NewPoolsHolderMock() - coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - dataComponents.DataPool = tdp - arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) - sp, _ := blproc.NewShardProcessor(arguments) - - sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, 1) - res := sp.RequestMissingFinalityAttestingHeaders() - assert.Equal(t, res > 0, true) -} - // --------- verifyIncludedMetaBlocksFinality func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing.T) { t.Parallel() @@ -2048,7 +2034,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { hdr := &block.Header{ Nonce: 1, Round: 1, - PubKeysBitmap: rootHash, + PubKeysBitmap: []byte{0b11111111}, PrevHash: hdrHash, Signature: rootHash, RootHash: rootHash, @@ -2121,6 +2107,12 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { return &block.MetaBlock{}, []byte("hash"), nil } arguments.BlockTracker = blockTrackerMock + resetCountersForManagedBlockSignerCalled := false + arguments.SentSignaturesTracker = &testscommon.SentSignatureTrackerStub{ + ResetCountersForManagedBlockSignerCalled: func(signerPk []byte) { + resetCountersForManagedBlockSignerCalled = true + }, + } sp, _ := blproc.NewShardProcessor(arguments) debuggerMethodWasCalled := false @@ -2144,6 +2136,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { assert.True(t, forkDetectorAddCalled) assert.Equal(t, hdrHash, blkc.GetCurrentBlockHeaderHash()) assert.True(t, debuggerMethodWasCalled) + assert.True(t, resetCountersForManagedBlockSignerCalled) // this should sleep as there is an async call to display current hdr and block in CommitBlock time.Sleep(time.Second) } diff --git a/process/common.go b/process/common.go index f06e0d00091..e8c9c7504ff 100644 --- a/process/common.go +++ b/process/common.go @@ -680,6 +680,10 @@ func DisplayProcessTxDetails( txHash []byte, addressPubkeyConverter core.PubkeyConverter, ) { + if log.GetLevel() > logger.LogTrace { + return + } + if !check.IfNil(accountHandler) { account, ok := accountHandler.(state.UserAccountHandler) if ok { diff --git a/process/constants.go b/process/constants.go index f75e7b882ee..44101f50b7c 100644 --- a/process/constants.go +++ b/process/constants.go @@ -36,6 +36,8 @@ const ( RelayedTx // RelayedTxV2 defines the ID of a slim relayed transaction version RelayedTxV2 + // RelayedTxV3 defines the ID of a relayed v3 transaction + RelayedTxV3 // RewardTx defines ID of a reward transaction RewardTx // InvalidTransaction defines unknown transaction type @@ -56,6 +58,8 @@ func (transactionType TransactionType) String() string { return "RelayedTx" case RelayedTxV2: return "RelayedTxV2" + case RelayedTxV3: + return "RelayedTxV3" case RewardTx: return "RewardTx" case InvalidTransaction: diff --git a/process/coordinator/process.go b/process/coordinator/process.go index fad1906ef00..8a50d9f0b21 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -15,6 +15,8 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/preprocess" @@ -24,7 +26,6 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/cache" - logger "github.com/multiversx/mx-chain-logger-go" ) var _ process.TransactionCoordinator = (*transactionCoordinator)(nil) @@ -703,7 +704,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe oldIndexOfLastTxProcessed := processedMbInfo.IndexOfLastTxProcessed - errProc := tc.processCompleteMiniBlock(preproc, miniBlock, miniBlockInfo.Hash, haveTime, haveAdditionalTime, scheduledMode, processedMbInfo) + errProc := tc.processCompleteMiniBlock(preproc, miniBlock, miniBlockInfo.Hash, haveTime, haveAdditionalTime, scheduledMode, processedMbInfo, headerHash) tc.handleProcessMiniBlockExecution(oldIndexOfLastTxProcessed, miniBlock, processedMbInfo, createMBDestMeExecutionInfo) if errProc != nil { shouldSkipShard[miniBlockInfo.SenderShardID] = true @@ -810,7 +811,7 @@ func (tc *transactionCoordinator) handleCreateMiniBlocksDestMeInit(headerHash [] return } - tc.InitProcessedTxsResults(headerHash) + tc.InitProcessedTxsResults(headerHash, nil) tc.gasHandler.Reset(headerHash) } @@ -1190,9 +1191,10 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( haveAdditionalTime func() bool, scheduledMode bool, processedMbInfo *processedMb.ProcessedMiniBlockInfo, + headerHash []byte, ) error { - snapshot := tc.handleProcessMiniBlockInit(miniBlockHash) + snapshot := tc.handleProcessMiniBlockInit(miniBlockHash, headerHash) log.Debug("transactionsCoordinator.processCompleteMiniBlock: before processing", "scheduled mode", scheduledMode, @@ -1259,9 +1261,9 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( return nil } -func (tc *transactionCoordinator) handleProcessMiniBlockInit(miniBlockHash []byte) int { +func (tc *transactionCoordinator) handleProcessMiniBlockInit(miniBlockHash []byte, headerHash []byte) int { snapshot := tc.accounts.JournalLen() - tc.InitProcessedTxsResults(miniBlockHash) + tc.InitProcessedTxsResults(miniBlockHash, headerHash) tc.gasHandler.Reset(miniBlockHash) return snapshot @@ -1282,7 +1284,7 @@ func (tc *transactionCoordinator) handleProcessTransactionError(snapshot int, mi } // InitProcessedTxsResults inits processed txs results for the given key -func (tc *transactionCoordinator) InitProcessedTxsResults(key []byte) { +func (tc *transactionCoordinator) InitProcessedTxsResults(key []byte, parentKey []byte) { tc.mutInterimProcessors.RLock() defer tc.mutInterimProcessors.RUnlock() @@ -1291,7 +1293,7 @@ func (tc *transactionCoordinator) InitProcessedTxsResults(key []byte) { if !ok { continue } - interProc.InitProcessedResults(key) + interProc.InitProcessedResults(key, parentKey) } } @@ -1831,14 +1833,14 @@ func checkTransactionCoordinatorNilParameters(arguments ArgTransactionCoordinato } // AddIntermediateTransactions adds the given intermediate transactions -func (tc *transactionCoordinator) AddIntermediateTransactions(mapSCRs map[block.Type][]data.TransactionHandler) error { +func (tc *transactionCoordinator) AddIntermediateTransactions(mapSCRs map[block.Type][]data.TransactionHandler, key []byte) error { for blockType, scrs := range mapSCRs { interimProc := tc.getInterimProcessor(blockType) if check.IfNil(interimProc) { return process.ErrNilIntermediateProcessor } - err := interimProc.AddIntermediateTransactions(scrs) + err := interimProc.AddIntermediateTransactions(scrs, key) if err != nil { return err } diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 0508620283e..d1dff667cb7 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -21,6 +21,10 @@ import ( "github.com/multiversx/mx-chain-core-go/data/scheduled" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" @@ -40,9 +44,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const MaxGasLimitPerBlock = uint64(100000) @@ -566,14 +567,14 @@ func createPreProcessorContainer() process.PreProcessorsContainer { func createInterimProcessorContainer() process.IntermediateProcessorContainer { argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - PubkeyConverter: createMockPubkeyConverter(), - Store: initStore(), - PoolsHolder: initDataPool([]byte("test_hash1")), - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + PubkeyConverter: createMockPubkeyConverter(), + Store: initStore(), + PoolsHolder: initDataPool([]byte("test_hash1")), + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } preFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) @@ -751,24 +752,25 @@ func TestTransactionCoordinator_CreateMarshalizedDataWithTxsAndScr(t *testing.T) scrs := make([]data.TransactionHandler, 0) body := &block.Body{} body.MiniBlocks = append(body.MiniBlocks, createMiniBlockWithOneTx(0, 1, block.TxBlock, txHash)) + genericTxHash := []byte("txHash") - scr := &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(99), PrevTxHash: []byte("txHash")} + scr := &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(99), PrevTxHash: genericTxHash} scrHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &hashingMocks.HasherMock{}, scr) scrs = append(scrs, scr) body.MiniBlocks = append(body.MiniBlocks, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) - scr = &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(199), PrevTxHash: []byte("txHash")} + scr = &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(199), PrevTxHash: genericTxHash} scrHash, _ = core.CalculateHash(&mock.MarshalizerMock{}, &hashingMocks.HasherMock{}, scr) scrs = append(scrs, scr) body.MiniBlocks = append(body.MiniBlocks, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) - scr = &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(299), PrevTxHash: []byte("txHash")} + scr = &smartContractResult.SmartContractResult{SndAddr: []byte("snd"), RcvAddr: []byte("rcv"), Value: big.NewInt(299), PrevTxHash: genericTxHash} scrHash, _ = core.CalculateHash(&mock.MarshalizerMock{}, &hashingMocks.HasherMock{}, scr) scrs = append(scrs, scr) body.MiniBlocks = append(body.MiniBlocks, createMiniBlockWithOneTx(0, 1, block.SmartContractResultBlock, scrHash)) scrInterimProc, _ := interimContainer.Get(block.SmartContractResultBlock) - _ = scrInterimProc.AddIntermediateTransactions(scrs) + _ = scrInterimProc.AddIntermediateTransactions(scrs, genericTxHash) mrTxs := tc.CreateMarshalizedData(body) assert.Equal(t, 1, len(mrTxs)) @@ -1019,6 +1021,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsWithSki } func TestTransactionCoordinator_HandleProcessMiniBlockInit(t *testing.T) { + headerHash := []byte("header hash") mbHash := []byte("miniblock hash") numResetGasHandler := 0 numInitInterimProc := 0 @@ -1034,7 +1037,8 @@ func TestTransactionCoordinator_HandleProcessMiniBlockInit(t *testing.T) { keysInterimProcs: []block.Type{block.SmartContractResultBlock}, interimProcessors: map[block.Type]process.IntermediateTransactionHandler{ block.SmartContractResultBlock: &mock.IntermediateTransactionHandlerStub{ - InitProcessedResultsCalled: func(key []byte) { + InitProcessedResultsCalled: func(key []byte, parentKey []byte) { + assert.Equal(t, headerHash, parentKey) assert.Equal(t, mbHash, key) numInitInterimProc++ }, @@ -1048,7 +1052,7 @@ func TestTransactionCoordinator_HandleProcessMiniBlockInit(t *testing.T) { numInitInterimProc = 0 shardCoord.CurrentShard = 0 - tc.handleProcessMiniBlockInit(mbHash) + tc.handleProcessMiniBlockInit(mbHash, headerHash) assert.Equal(t, 1, numResetGasHandler) assert.Equal(t, 1, numInitInterimProc) }) @@ -1057,7 +1061,7 @@ func TestTransactionCoordinator_HandleProcessMiniBlockInit(t *testing.T) { numInitInterimProc = 0 shardCoord.CurrentShard = core.MetachainShardId - tc.handleProcessMiniBlockInit(mbHash) + tc.handleProcessMiniBlockInit(mbHash, headerHash) assert.Equal(t, 1, numResetGasHandler) assert.Equal(t, 1, numInitInterimProc) }) @@ -1925,6 +1929,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot // all txs will be in datapool and none of them will return err when processed // so, tx processor will return nil on processing tx + headerHash := []byte("header hash") txHash1 := []byte("tx hash 1") txHash2 := []byte("tx hash 2") txHash3 := []byte("tx hash 3") @@ -2053,7 +2058,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot IndexOfLastTxProcessed: -1, FullyProcessed: false, } - err = tc.processCompleteMiniBlock(preproc, &miniBlock, []byte("hash"), haveTime, haveAdditionalTime, false, processedMbInfo) + err = tc.processCompleteMiniBlock(preproc, &miniBlock, []byte("hash"), haveTime, haveAdditionalTime, false, processedMbInfo, headerHash) assert.Nil(t, err) assert.Equal(t, tx1Nonce, tx1ExecutionResult) @@ -2085,6 +2090,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR TxHashes: [][]byte{txHash1, txHash2, txHash3}, } + headerHash := []byte("header hash") tx1Nonce := uint64(45) tx2Nonce := uint64(46) tx3Nonce := uint64(47) @@ -2198,7 +2204,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR IndexOfLastTxProcessed: -1, FullyProcessed: false, } - err = tc.processCompleteMiniBlock(preproc, &miniBlock, []byte("hash"), haveTime, haveAdditionalTime, false, processedMbInfo) + err = tc.processCompleteMiniBlock(preproc, &miniBlock, []byte("hash"), haveTime, haveAdditionalTime, false, processedMbInfo, headerHash) assert.Equal(t, process.ErrHigherNonceInTransaction, err) assert.True(t, revertAccntStateCalled) @@ -2210,14 +2216,14 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi tdp := initDataPool(txHash) shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ - ShardCoordinator: shardCoordinator, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - PubkeyConverter: createMockPubkeyConverter(), - Store: &storageStubs.ChainStorerStub{}, - PoolsHolder: tdp, - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + ShardCoordinator: shardCoordinator, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + PubkeyConverter: createMockPubkeyConverter(), + Store: &storageStubs.ChainStorerStub{}, + PoolsHolder: tdp, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } preFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) @@ -2278,7 +2284,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { return MaxGasLimitPerBlock }, }, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } interFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) @@ -2342,7 +2348,8 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { tx, _ := tdp.UnsignedTransactions().SearchFirstData(scrHash) txs := make([]data.TransactionHandler, 0) txs = append(txs, tx.(data.TransactionHandler)) - err = interProc.AddIntermediateTransactions(txs) + txHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &hashingMocks.HasherMock{}, tx) + err = interProc.AddIntermediateTransactions(txs, txHash) assert.Nil(t, err) body := &block.Body{MiniBlocks: []*block.MiniBlock{{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId() + 1, TxHashes: [][]byte{scrHash}}}} @@ -4183,7 +4190,7 @@ func TestTransactionCoordinator_AddIntermediateTransactions(t *testing.T) { }, } - err := tc.AddIntermediateTransactions(mapSCRs) + err := tc.AddIntermediateTransactions(mapSCRs, nil) assert.Equal(t, process.ErrNilIntermediateProcessor, err) }) @@ -4195,7 +4202,7 @@ func TestTransactionCoordinator_AddIntermediateTransactions(t *testing.T) { expectedErr := errors.New("expected err") tc.keysInterimProcs = append(tc.keysInterimProcs, block.SmartContractResultBlock) tc.interimProcessors[block.SmartContractResultBlock] = &mock.IntermediateTransactionHandlerMock{ - AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler, key []byte) error { return expectedErr }, } @@ -4208,7 +4215,7 @@ func TestTransactionCoordinator_AddIntermediateTransactions(t *testing.T) { }, } - err := tc.AddIntermediateTransactions(mapSCRs) + err := tc.AddIntermediateTransactions(mapSCRs, nil) assert.Equal(t, expectedErr, err) }) @@ -4225,7 +4232,7 @@ func TestTransactionCoordinator_AddIntermediateTransactions(t *testing.T) { tc.keysInterimProcs = append(tc.keysInterimProcs, block.SmartContractResultBlock) tc.interimProcessors[block.SmartContractResultBlock] = &mock.IntermediateTransactionHandlerMock{ - AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler, key []byte) error { assert.Equal(t, expectedTxs, txs) return nil }, @@ -4239,7 +4246,7 @@ func TestTransactionCoordinator_AddIntermediateTransactions(t *testing.T) { }, } - err := tc.AddIntermediateTransactions(mapSCRs) + err := tc.AddIntermediateTransactions(mapSCRs, nil) assert.Nil(t, err) }) } diff --git a/process/coordinator/transactionType.go b/process/coordinator/transactionType.go index f1d47aff44c..d754da2c34d 100644 --- a/process/coordinator/transactionType.go +++ b/process/coordinator/transactionType.go @@ -91,6 +91,10 @@ func (tth *txTypeHandler) ComputeTransactionType(tx data.TransactionHandler) (pr return process.InvalidTransaction, process.InvalidTransaction } + if tth.isRelayedTransactionV3(tx) { + return process.RelayedTxV3, process.RelayedTxV3 + } + if len(tx.GetData()) == 0 { return process.MoveBalance, process.MoveBalance } @@ -191,6 +195,10 @@ func (tth *txTypeHandler) isRelayedTransactionV2(functionName string) bool { return functionName == core.RelayedTransactionV2 } +func (tth *txTypeHandler) isRelayedTransactionV3(tx data.TransactionHandler) bool { + return len(tx.GetUserTransactions()) != 0 +} + func (tth *txTypeHandler) isDestAddressEmpty(tx data.TransactionHandler) bool { isEmptyAddress := bytes.Equal(tx.GetRcvAddr(), make([]byte, tth.pubkeyConv.Len())) return isEmptyAddress diff --git a/process/coordinator/transactionType_test.go b/process/coordinator/transactionType_test.go index 918b6069212..9739075d847 100644 --- a/process/coordinator/transactionType_test.go +++ b/process/coordinator/transactionType_test.go @@ -466,6 +466,32 @@ func TestTxTypeHandler_ComputeTransactionTypeRelayedV2Func(t *testing.T) { assert.Equal(t, process.RelayedTxV2, txTypeCross) } +func TestTxTypeHandler_ComputeTransactionTypeRelayedV3(t *testing.T) { + t.Parallel() + + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("000") + tx.RcvAddr = []byte("001") + tx.Value = big.NewInt(45) + tx.InnerTransactions = []*transaction.Transaction{{Nonce: 1}} + + arg := createMockArguments() + arg.PubkeyConverter = &testscommon.PubkeyConverterStub{ + LenCalled: func() int { + return len(tx.RcvAddr) + }, + } + tth, err := NewTxTypeHandler(arg) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + txTypeIn, txTypeCross := tth.ComputeTransactionType(tx) + assert.Equal(t, process.RelayedTxV3, txTypeIn) + assert.Equal(t, process.RelayedTxV3, txTypeCross) +} + func TestTxTypeHandler_ComputeTransactionTypeForSCRCallBack(t *testing.T) { t.Parallel() diff --git a/process/disabled/failedTxLogsAccumulator.go b/process/disabled/failedTxLogsAccumulator.go new file mode 100644 index 00000000000..3bd3f01cd69 --- /dev/null +++ b/process/disabled/failedTxLogsAccumulator.go @@ -0,0 +1,33 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-core-go/data" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +type failedTxLogsAccumulator struct { +} + +// NewFailedTxLogsAccumulator returns a new instance of disabled failedTxLogsAccumulator +func NewFailedTxLogsAccumulator() *failedTxLogsAccumulator { + return &failedTxLogsAccumulator{} +} + +// GetLogs returns false as it is disabled +func (accumulator *failedTxLogsAccumulator) GetLogs(_ []byte) (data.TransactionHandler, []*vmcommon.LogEntry, bool) { + return nil, nil, false +} + +// SaveLogs returns nil as it is disabled +func (accumulator *failedTxLogsAccumulator) SaveLogs(_ []byte, _ data.TransactionHandler, _ []*vmcommon.LogEntry) error { + return nil +} + +// Remove does nothing as it is disabled +func (accumulator *failedTxLogsAccumulator) Remove(_ []byte) { +} + +// IsInterfaceNil returns true if there is no value under the interface +func (accumulator *failedTxLogsAccumulator) IsInterfaceNil() bool { + return accumulator == nil +} diff --git a/process/disabled/relayedTxV3Processor.go b/process/disabled/relayedTxV3Processor.go new file mode 100644 index 00000000000..ddabd2753c8 --- /dev/null +++ b/process/disabled/relayedTxV3Processor.go @@ -0,0 +1,23 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-core-go/data/transaction" +) + +type relayedTxV3Processor struct { +} + +// NewRelayedTxV3Processor returns a new instance of disabled relayedTxV3Processor +func NewRelayedTxV3Processor() *relayedTxV3Processor { + return &relayedTxV3Processor{} +} + +// CheckRelayedTx returns nil as it is disabled +func (proc *relayedTxV3Processor) CheckRelayedTx(_ *transaction.Transaction) error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (proc *relayedTxV3Processor) IsInterfaceNil() bool { + return proc == nil +} diff --git a/process/disabled/txTypeHandler.go b/process/disabled/txTypeHandler.go new file mode 100644 index 00000000000..302e81af555 --- /dev/null +++ b/process/disabled/txTypeHandler.go @@ -0,0 +1,28 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/process" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("disabledTxTypeHandler") + +type txTypeHandler struct { +} + +// NewTxTypeHandler returns a new instance of disabled txTypeHandler +func NewTxTypeHandler() *txTypeHandler { + return &txTypeHandler{} +} + +// ComputeTransactionType always returns invalid transaction as it is disabled +func (handler *txTypeHandler) ComputeTransactionType(_ data.TransactionHandler) (process.TransactionType, process.TransactionType) { + log.Warn("disabled txTypeHandler ComputeTransactionType always returns invalid transaction") + return process.InvalidTransaction, process.InvalidTransaction +} + +// IsInterfaceNil returns true if there is no value under the interface +func (handler *txTypeHandler) IsInterfaceNil() bool { + return handler == nil +} diff --git a/process/economics/builtInFunctionsCost.go b/process/economics/builtInFunctionsCost.go deleted file mode 100644 index f784b5f2332..00000000000 --- a/process/economics/builtInFunctionsCost.go +++ /dev/null @@ -1,177 +0,0 @@ -package economics - -import ( - "github.com/mitchellh/mapstructure" - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/process" -) - -// ArgsBuiltInFunctionCost holds all components that are needed to create a new instance of builtInFunctionsCost -type ArgsBuiltInFunctionCost struct { - GasSchedule core.GasScheduleNotifier - ArgsParser process.ArgumentsParser -} - -type builtInFunctionsCost struct { - gasConfig *process.GasCost - specialBuiltInFunctions map[string]struct{} - argsParser process.ArgumentsParser -} - -// NewBuiltInFunctionsCost will create a new instance of builtInFunctionsCost -func NewBuiltInFunctionsCost(args *ArgsBuiltInFunctionCost) (*builtInFunctionsCost, error) { - if args == nil { - return nil, process.ErrNilArgsBuiltInFunctionsConstHandler - } - if check.IfNil(args.ArgsParser) { - return nil, process.ErrNilArgumentParser - } - if check.IfNil(args.GasSchedule) { - return nil, process.ErrNilGasSchedule - } - - bs := &builtInFunctionsCost{ - argsParser: args.ArgsParser, - } - - bs.initSpecialBuiltInFunctionCostMap() - - var err error - bs.gasConfig, err = createGasConfig(args.GasSchedule.LatestGasSchedule()) - if err != nil { - return nil, err - } - - args.GasSchedule.RegisterNotifyHandler(bs) - - return bs, nil -} - -func (bc *builtInFunctionsCost) initSpecialBuiltInFunctionCostMap() { - bc.specialBuiltInFunctions = map[string]struct{}{ - core.BuiltInFunctionClaimDeveloperRewards: {}, - core.BuiltInFunctionChangeOwnerAddress: {}, - core.BuiltInFunctionSetUserName: {}, - core.BuiltInFunctionSaveKeyValue: {}, - core.BuiltInFunctionESDTTransfer: {}, - core.BuiltInFunctionESDTBurn: {}, - core.BuiltInFunctionESDTLocalBurn: {}, - core.BuiltInFunctionESDTLocalMint: {}, - core.BuiltInFunctionESDTNFTAddQuantity: {}, - core.BuiltInFunctionESDTNFTBurn: {}, - core.BuiltInFunctionESDTNFTCreate: {}, - } -} - -// GasScheduleChange is called when gas schedule is changed, thus all contracts must be updated -func (bc *builtInFunctionsCost) GasScheduleChange(gasSchedule map[string]map[string]uint64) { - newGasConfig, err := createGasConfig(gasSchedule) - if err != nil { - return - } - - bc.gasConfig = newGasConfig -} - -// ComputeBuiltInCost will compute built-in function cost -func (bc *builtInFunctionsCost) ComputeBuiltInCost(tx data.TransactionWithFeeHandler) uint64 { - function, arguments, err := bc.argsParser.ParseCallData(string(tx.GetData())) - if err != nil { - return 0 - } - - switch function { - case core.BuiltInFunctionClaimDeveloperRewards: - return bc.gasConfig.BuiltInCost.ClaimDeveloperRewards - case core.BuiltInFunctionChangeOwnerAddress: - return bc.gasConfig.BuiltInCost.ChangeOwnerAddress - case core.BuiltInFunctionSetUserName: - return bc.gasConfig.BuiltInCost.SaveUserName - case core.BuiltInFunctionSaveKeyValue: - return bc.gasConfig.BuiltInCost.SaveKeyValue - case core.BuiltInFunctionESDTTransfer: - return bc.gasConfig.BuiltInCost.ESDTTransfer - case core.BuiltInFunctionESDTBurn: - return bc.gasConfig.BuiltInCost.ESDTBurn - case core.BuiltInFunctionESDTLocalBurn: - return bc.gasConfig.BuiltInCost.ESDTLocalBurn - case core.BuiltInFunctionESDTLocalMint: - return bc.gasConfig.BuiltInCost.ESDTLocalMint - case core.BuiltInFunctionESDTNFTAddQuantity: - return bc.gasConfig.BuiltInCost.ESDTNFTAddQuantity - case core.BuiltInFunctionESDTNFTBurn: - return bc.gasConfig.BuiltInCost.ESDTNFTBurn - case core.BuiltInFunctionESDTNFTCreate: - costStorage := calculateLenOfArguments(arguments) * bc.gasConfig.BaseOperationCost.StorePerByte - return bc.gasConfig.BuiltInCost.ESDTNFTCreate + costStorage - case core.BuiltInFunctionSetGuardian: - return bc.gasConfig.BuiltInCost.SetGuardian - case core.BuiltInFunctionGuardAccount: - return bc.gasConfig.BuiltInCost.GuardAccount - case core.BuiltInFunctionUnGuardAccount: - return bc.gasConfig.BuiltInCost.UnGuardAccount - default: - return 0 - } -} - -func calculateLenOfArguments(arguments [][]byte) uint64 { - totalLen := uint64(0) - for _, arg := range arguments { - totalLen += uint64(len(arg)) - } - - return totalLen -} - -// IsBuiltInFuncCall will check is the provided transaction is a build in function call -func (bc *builtInFunctionsCost) IsBuiltInFuncCall(tx data.TransactionWithFeeHandler) bool { - function, arguments, err := bc.argsParser.ParseCallData(string(tx.GetData())) - if err != nil { - return false - } - - _, isSpecialBuiltIn := bc.specialBuiltInFunctions[function] - isSCCallAfter := core.IsSmartContractAddress(tx.GetRcvAddr()) && len(arguments) > core.MinLenArgumentsESDTTransfer - - return isSpecialBuiltIn && !isSCCallAfter -} - -// IsInterfaceNil returns true if underlying object is nil -func (bc *builtInFunctionsCost) IsInterfaceNil() bool { - return bc == nil -} - -func createGasConfig(gasMap map[string]map[string]uint64) (*process.GasCost, error) { - baseOps := &process.BaseOperationCost{} - err := mapstructure.Decode(gasMap[common.BaseOperationCost], baseOps) - if err != nil { - return nil, err - } - - err = check.ForZeroUintFields(*baseOps) - if err != nil { - return nil, err - } - - builtInOps := &process.BuiltInCost{} - err = mapstructure.Decode(gasMap[common.BuiltInCost], builtInOps) - if err != nil { - return nil, err - } - - err = check.ForZeroUintFields(*builtInOps) - if err != nil { - return nil, err - } - - gasCost := process.GasCost{ - BaseOperationCost: *baseOps, - BuiltInCost: *builtInOps, - } - - return &gasCost, nil -} diff --git a/process/economics/builtInFunctionsCost_test.go b/process/economics/builtInFunctionsCost_test.go deleted file mode 100644 index befcca25912..00000000000 --- a/process/economics/builtInFunctionsCost_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package economics_test - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/process/economics" - "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" - "github.com/stretchr/testify/require" -) - -func TestNewBuiltInFunctionsCost(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - args func() *economics.ArgsBuiltInFunctionCost - exErr error - }{ - { - name: "NilArguments", - args: func() *economics.ArgsBuiltInFunctionCost { - return nil - }, - exErr: process.ErrNilArgsBuiltInFunctionsConstHandler, - }, - { - name: "NilArgumentsParser", - args: func() *economics.ArgsBuiltInFunctionCost { - return &economics.ArgsBuiltInFunctionCost{ - ArgsParser: nil, - GasSchedule: testscommon.NewGasScheduleNotifierMock(nil), - } - }, - exErr: process.ErrNilArgumentParser, - }, - { - name: "NilGasScheduleHandler", - args: func() *economics.ArgsBuiltInFunctionCost { - return &economics.ArgsBuiltInFunctionCost{ - ArgsParser: &mock.ArgumentParserMock{}, - GasSchedule: nil, - } - }, - exErr: process.ErrNilGasSchedule, - }, - { - name: "ShouldWork", - args: func() *economics.ArgsBuiltInFunctionCost { - return &economics.ArgsBuiltInFunctionCost{ - ArgsParser: &mock.ArgumentParserMock{}, - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - } - }, - exErr: nil, - }, - } - - for _, test := range tests { - _, err := economics.NewBuiltInFunctionsCost(test.args()) - require.Equal(t, test.exErr, err) - } -} - -func TestNewBuiltInFunctionsCost_GasConfig(t *testing.T) { - t.Parallel() - - args := &economics.ArgsBuiltInFunctionCost{ - ArgsParser: &mock.ArgumentParserMock{}, - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 0)), - } - - builtInCostHandler, err := economics.NewBuiltInFunctionsCost(args) - require.NotNil(t, err) - require.Nil(t, builtInCostHandler) - require.True(t, check.IfNil(builtInCostHandler)) -} diff --git a/process/economics/economicsData.go b/process/economics/economicsData.go index 60658b19bf2..a510447dab2 100644 --- a/process/economics/economicsData.go +++ b/process/economics/economicsData.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/disabled" "github.com/multiversx/mx-chain-go/statusHandler" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -27,31 +28,28 @@ var log = logger.GetOrCreate("process/economics") type economicsData struct { *gasConfigHandler *rewardsConfigHandler - gasPriceModifier float64 - minInflation float64 - yearSettings map[uint32]*config.YearSetting - mutYearSettings sync.RWMutex - statusHandler core.AppStatusHandler - builtInFunctionsCostHandler BuiltInFunctionsCostHandler - enableEpochsHandler common.EnableEpochsHandler - txVersionHandler process.TxVersionCheckerHandler - mut sync.RWMutex + gasPriceModifier float64 + minInflation float64 + yearSettings map[uint32]*config.YearSetting + mutYearSettings sync.RWMutex + statusHandler core.AppStatusHandler + enableEpochsHandler common.EnableEpochsHandler + txVersionHandler process.TxVersionCheckerHandler + txTypeHandler process.TxTypeHandler + mutTxTypeHandler sync.RWMutex + mut sync.RWMutex } // ArgsNewEconomicsData defines the arguments needed for new economics economicsData type ArgsNewEconomicsData struct { - TxVersionChecker process.TxVersionCheckerHandler - BuiltInFunctionsCostHandler BuiltInFunctionsCostHandler - Economics *config.EconomicsConfig - EpochNotifier process.EpochNotifier - EnableEpochsHandler common.EnableEpochsHandler + TxVersionChecker process.TxVersionCheckerHandler + Economics *config.EconomicsConfig + EpochNotifier process.EpochNotifier + EnableEpochsHandler common.EnableEpochsHandler } // NewEconomicsData will create an object with information about economics parameters func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { - if check.IfNil(args.BuiltInFunctionsCostHandler) { - return nil, process.ErrNilBuiltInFunctionsCostHandler - } if check.IfNil(args.TxVersionChecker) { return nil, process.ErrNilTransactionVersionChecker } @@ -75,12 +73,12 @@ func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { } ed := &economicsData{ - minInflation: args.Economics.GlobalSettings.MinimumInflation, - gasPriceModifier: args.Economics.FeeSettings.GasPriceModifier, - statusHandler: statusHandler.NewNilStatusHandler(), - builtInFunctionsCostHandler: args.BuiltInFunctionsCostHandler, - enableEpochsHandler: args.EnableEpochsHandler, - txVersionHandler: args.TxVersionChecker, + minInflation: args.Economics.GlobalSettings.MinimumInflation, + gasPriceModifier: args.Economics.FeeSettings.GasPriceModifier, + statusHandler: statusHandler.NewNilStatusHandler(), + enableEpochsHandler: args.EnableEpochsHandler, + txVersionHandler: args.TxVersionChecker, + txTypeHandler: disabled.NewTxTypeHandler(), } ed.yearSettings = make(map[uint32]*config.YearSetting) @@ -143,6 +141,19 @@ func (ed *economicsData) SetStatusHandler(statusHandler core.AppStatusHandler) e return ed.rewardsConfigHandler.setStatusHandler(statusHandler) } +// SetTxTypeHandler sets the provided tx type handler +func (ed *economicsData) SetTxTypeHandler(txTypeHandler process.TxTypeHandler) error { + if check.IfNil(txTypeHandler) { + return process.ErrNilTxTypeHandler + } + + ed.mutTxTypeHandler.Lock() + ed.txTypeHandler = txTypeHandler + ed.mutTxTypeHandler.Unlock() + + return nil +} + // LeaderPercentage returns leader reward percentage func (ed *economicsData) LeaderPercentage() float64 { currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() @@ -291,6 +302,11 @@ func (ed *economicsData) ComputeTxFee(tx data.TransactionWithFeeHandler) *big.In // ComputeTxFeeInEpoch computes the provided transaction's fee in a specific epoch func (ed *economicsData) ComputeTxFeeInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) *big.Int { + if len(tx.GetUserTransactions()) > 0 { + _, totalFee, _ := ed.ComputeRelayedTxFees(tx) + return totalFee + } + if ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.GasPriceModifierFlag, epoch) { if isSmartContractResult(tx) { return ed.ComputeFeeForProcessingInEpoch(tx, tx.GetGasLimit(), epoch) @@ -314,6 +330,56 @@ func (ed *economicsData) ComputeTxFeeInEpoch(tx data.TransactionWithFeeHandler, return ed.ComputeMoveBalanceFeeInEpoch(tx, epoch) } +// ComputeRelayedTxFees returns the both the total fee for the entire relayed tx and the relayed only fee +func (ed *economicsData) ComputeRelayedTxFees(tx data.TransactionWithFeeHandler) (*big.Int, *big.Int, error) { + innerTxs := tx.GetUserTransactions() + if len(innerTxs) == 0 { + return big.NewInt(0), big.NewInt(0), process.ErrEmptyInnerTransactions + } + + feesForInnerTxs := ed.getTotalFeesRequiredForInnerTxs(innerTxs) + + relayerUnguardedMoveBalanceFee := core.SafeMul(ed.GasPriceForMove(tx), ed.MinGasLimit()) + relayerTotalMoveBalanceFee := ed.ComputeMoveBalanceFee(tx) + relayerMoveBalanceFeeDiff := big.NewInt(0).Sub(relayerTotalMoveBalanceFee, relayerUnguardedMoveBalanceFee) + + relayerFee := big.NewInt(0).Mul(relayerUnguardedMoveBalanceFee, big.NewInt(int64(len(innerTxs)))) + relayerFee.Add(relayerFee, relayerMoveBalanceFeeDiff) // add the difference in case of guarded relayed tx + + totalFee := big.NewInt(0).Add(relayerFee, feesForInnerTxs) + + return relayerFee, totalFee, nil +} + +func (ed *economicsData) getTotalFeesRequiredForInnerTxs(innerTxs []data.TransactionHandler) *big.Int { + totalFees := big.NewInt(0) + for _, innerTx := range innerTxs { + if ed.isMoveBalance(innerTx) { + innerTxFee := ed.ComputeMoveBalanceFee(innerTx) + totalFees.Add(totalFees, innerTxFee) + + continue + } + + gasToUse := innerTx.GetGasLimit() - ed.ComputeGasLimit(innerTx) + moveBalanceUserFee := ed.ComputeMoveBalanceFee(innerTx) + processingUserFee := ed.ComputeFeeForProcessing(innerTx, gasToUse) + innerTxFee := big.NewInt(0).Add(moveBalanceUserFee, processingUserFee) + + totalFees.Add(totalFees, innerTxFee) + } + + return totalFees +} + +func (ed *economicsData) isMoveBalance(tx data.TransactionHandler) bool { + ed.mutTxTypeHandler.RLock() + _, dstTxType := ed.txTypeHandler.ComputeTransactionType(tx) + ed.mutTxTypeHandler.RUnlock() + + return dstTxType == process.MoveBalance +} + // SplitTxGasInCategories returns the gas split per categories func (ed *economicsData) SplitTxGasInCategories(tx data.TransactionWithFeeHandler) (gasLimitMove, gasLimitProcess uint64) { currentEpoch := ed.enableEpochsHandler.GetCurrentEpoch() @@ -517,28 +583,18 @@ func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValue(tx data.Transact // ComputeGasUsedAndFeeBasedOnRefundValueInEpoch will compute gas used value and transaction fee using refund value from a SCR in a specific epoch func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) { if refundValue.Cmp(big.NewInt(0)) == 0 { - if ed.builtInFunctionsCostHandler.IsBuiltInFuncCall(tx) { - builtInCost := ed.builtInFunctionsCostHandler.ComputeBuiltInCost(tx) - computedGasLimit := ed.ComputeGasLimitInEpoch(tx, epoch) - - gasLimitWithBuiltInCost := builtInCost + computedGasLimit - txFee := ed.ComputeTxFeeBasedOnGasUsedInEpoch(tx, gasLimitWithBuiltInCost, epoch) - - gasLimitWithoutMoveBalance := tx.GetGasLimit() - computedGasLimit - // transaction will consume all the gas if sender provided too much gas - if isTooMuchGasProvided(gasLimitWithoutMoveBalance, gasLimitWithoutMoveBalance-builtInCost) { - return tx.GetGasLimit(), ed.ComputeTxFeeInEpoch(tx, epoch) - } - - return gasLimitWithBuiltInCost, txFee - } - txFee := ed.ComputeTxFeeInEpoch(tx, epoch) + return tx.GetGasLimit(), txFee } txFee := ed.ComputeTxFeeInEpoch(tx, epoch) + if len(tx.GetUserTransactions()) > 0 { + gasUnitsUsed := big.NewInt(0).Div(txFee, big.NewInt(0).SetUint64(tx.GetGasPrice())) + return gasUnitsUsed.Uint64(), txFee + } + isPenalizedTooMuchGasFlagEnabled := ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.PenalizedTooMuchGasFlag, epoch) isGasPriceModifierFlagEnabled := ed.enableEpochsHandler.IsFlagEnabledInEpoch(common.GasPriceModifierFlag, epoch) flagCorrectTxFee := !isPenalizedTooMuchGasFlagEnabled && !isGasPriceModifierFlagEnabled @@ -560,15 +616,6 @@ func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.T return gasUsed, txFee } -func isTooMuchGasProvided(gasProvided uint64, gasRemained uint64) bool { - if gasProvided <= gasRemained { - return false - } - - gasUsed := gasProvided - gasRemained - return gasProvided > gasUsed*process.MaxGasFeeHigherFactorAccepted -} - // ComputeTxFeeBasedOnGasUsed will compute transaction fee func (ed *economicsData) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int { currenEpoch := ed.enableEpochsHandler.GetCurrentEpoch() diff --git a/process/economics/economicsData_test.go b/process/economics/economicsData_test.go index 417ef1b7826..5fdb8c369c2 100644 --- a/process/economics/economicsData_test.go +++ b/process/economics/economicsData_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" @@ -16,13 +17,10 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/economics" - "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -106,13 +104,12 @@ func createArgsForEconomicsData(gasModifier float64) economics.ArgsNewEconomicsD return flag == common.GasPriceModifierFlag }, }, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } return args } -func createArgsForEconomicsDataRealFees(handler economics.BuiltInFunctionsCostHandler) economics.ArgsNewEconomicsData { +func createArgsForEconomicsDataRealFees() economics.ArgsNewEconomicsData { feeSettings := feeSettingsReal() args := economics.ArgsNewEconomicsData{ Economics: createDummyEconomicsConfig(feeSettings), @@ -122,8 +119,7 @@ func createArgsForEconomicsDataRealFees(handler economics.BuiltInFunctionsCostHa return flag == common.GasPriceModifierFlag }, }, - BuiltInFunctionsCostHandler: handler, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } return args } @@ -525,16 +521,6 @@ func TestNewEconomicsData_InvalidTopUpGradientPointShouldErr(t *testing.T) { assert.True(t, errors.Is(err, process.ErrInvalidRewardsTopUpGradientPoint)) } -func TestNewEconomicsData_NilBuiltInFunctionsCostHandlerShouldErr(t *testing.T) { - t.Parallel() - - args := createArgsForEconomicsData(1) - args.BuiltInFunctionsCostHandler = nil - - _, err := economics.NewEconomicsData(args) - assert.Equal(t, process.ErrNilBuiltInFunctionsCostHandler, err) -} - func TestNewEconomicsData_NilTxVersionCheckerShouldErr(t *testing.T) { t.Parallel() @@ -1141,7 +1127,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueZero(t *testing.T) func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheckGasUsedValue(t *testing.T) { t.Parallel() - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{})) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) txData := []byte("0061736d0100000001150460037f7f7e017f60027f7f017e60017e0060000002420303656e7611696e74363473746f7261676553746f7265000003656e7610696e74363473746f726167654c6f6164000103656e760b696e74363466696e6973680002030504030303030405017001010105030100020608017f01419088040b072f05066d656d6f7279020004696e6974000309696e6372656d656e7400040964656372656d656e7400050367657400060a8a01041300418088808000410742011080808080001a0b2e01017e4180888080004107418088808000410710818080800042017c22001080808080001a20001082808080000b2e01017e41808880800041074180888080004107108180808000427f7c22001080808080001a20001082808080000b160041808880800041071081808080001082808080000b0b0f01004180080b08434f554e54455200@0500@0100") tx1 := &transaction.Transaction{ GasPrice: 1000000000, @@ -1194,7 +1180,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheckGasUsedValue(t func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheck(t *testing.T) { t.Parallel() - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{})) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) txData := []byte("0061736d0100000001150460037f7f7e017f60027f7f017e60017e0060000002420303656e7611696e74363473746f7261676553746f7265000003656e7610696e74363473746f726167654c6f6164000103656e760b696e74363466696e6973680002030504030303030405017001010105030100020608017f01419088040b072f05066d656d6f7279020004696e6974000309696e6372656d656e7400040964656372656d656e7400050367657400060a8a01041300418088808000410742011080808080001a0b2e01017e4180888080004107418088808000410710818080800042017c22001080808080001a20001082808080000b2e01017e41808880800041074180888080004107108180808000427f7c22001080808080001a20001082808080000b160041808880800041071081808080001082808080000b0b0f01004180080b08434f554e54455200@0500@0100") tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1214,11 +1200,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheck(t *testing.T) func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn_ToMuchGasProvided(t *testing.T) { t.Parallel() - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(builtInCostHandler)) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1236,11 +1218,6 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn_ToMu } func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueStakeTx(t *testing.T) { - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - txStake := &transaction.Transaction{ GasPrice: 1000000000, GasLimit: 250000000, @@ -1250,7 +1227,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueStakeTx(t *testing. expectedGasUsed := uint64(39378847) expectedFee, _ := big.NewInt(0).SetString("39378847000000000", 10) - args := createArgsForEconomicsDataRealFees(builtInCostHandler) + args := createArgsForEconomicsDataRealFees() args.EpochNotifier = forking.NewGenericEpochNotifier() args.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: 1000, @@ -1267,11 +1244,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueStakeTx(t *testing. func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn(t *testing.T) { t.Parallel() - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(builtInCostHandler)) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1279,8 +1252,8 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn(t *t Data: []byte("ESDTTransfer@54474e2d383862383366@0a"), } - expectedGasUsed := uint64(104001) - expectedFee, _ := big.NewInt(0).SetString("104000010000000", 10) + expectedGasUsed := uint64(104009) + expectedFee, _ := big.NewInt(0).SetString("104000090000000", 10) refundValue, _ := big.NewInt(0).SetString("0", 10) gasUsed, fee := economicData.ComputeGasUsedAndFeeBasedOnRefundValue(tx, refundValue) @@ -1291,11 +1264,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn(t *t func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltInTooMuchGas(t *testing.T) { t.Parallel() - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(builtInCostHandler)) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1315,7 +1284,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltInTooMu func TestEconomicsData_ComputeGasLimitBasedOnBalance(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() args.EpochNotifier = forking.NewGenericEpochNotifier() args.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(config.EnableEpochs{ GasPriceModifierEnableEpoch: 1, @@ -1353,7 +1322,7 @@ func TestEconomicsData_ComputeGasLimitBasedOnBalance(t *testing.T) { func TestEconomicsData_MaxGasPriceSetGuardian(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() maxGasPriceSetGuardianString := "2000000" expectedMaxGasPriceSetGuardian, err := strconv.ParseUint(maxGasPriceSetGuardianString, 10, 64) require.Nil(t, err) @@ -1369,7 +1338,7 @@ func TestEconomicsData_SetStatusHandler(t *testing.T) { t.Run("nil status handler should error", func(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() economicData, _ := economics.NewEconomicsData(args) err := economicData.SetStatusHandler(nil) @@ -1378,7 +1347,7 @@ func TestEconomicsData_SetStatusHandler(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() economicData, _ := economics.NewEconomicsData(args) err := economicData.SetStatusHandler(&statusHandler.AppStatusHandlerStub{}) @@ -1653,3 +1622,102 @@ func TestEconomicsData_RewardsTopUpFactor(t *testing.T) { value := economicsData.RewardsTopUpFactor() assert.Equal(t, topUpFactor, value) } + +func TestEconomicsData_ComputeRelayedTxFees(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + minGasLimit, _ := strconv.Atoi(args.Economics.FeeSettings.GasLimitSettings[0].MinGasLimit) + tx := &transaction.Transaction{ + Nonce: 0, + Value: big.NewInt(0), + RcvAddr: []byte("rel"), + SndAddr: []byte("rel"), + GasPrice: 1, + GasLimit: uint64(minGasLimit) * 4, + InnerTransactions: []*transaction.Transaction{ + { + Nonce: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcv1"), + SndAddr: []byte("snd1"), + GasPrice: 1, + GasLimit: uint64(minGasLimit), + RelayerAddr: []byte("rel"), + }, + { + Nonce: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcv1"), + SndAddr: []byte("snd2"), + GasPrice: 1, + GasLimit: uint64(minGasLimit), + RelayerAddr: []byte("rel"), + }, + }, + } + t.Run("empty inner txs should error", func(t *testing.T) { + t.Parallel() + + economicsData, _ := economics.NewEconomicsData(args) + + txCopy := *tx + txCopy.InnerTransactions = []*transaction.Transaction{} + relayerFee, totalFee, err := economicsData.ComputeRelayedTxFees(&txCopy) + require.Equal(t, process.ErrEmptyInnerTransactions, err) + require.Equal(t, big.NewInt(0), relayerFee) + require.Equal(t, big.NewInt(0), totalFee) + }) + t.Run("should work unguarded", func(t *testing.T) { + t.Parallel() + + economicsData, _ := economics.NewEconomicsData(args) + + _ = economicsData.SetTxTypeHandler(&testscommon.TxTypeHandlerMock{ + ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) { + return process.MoveBalance, process.MoveBalance + }, + }) + + relayerFee, totalFee, err := economicsData.ComputeRelayedTxFees(tx) + require.NoError(t, err) + expectedRelayerFee := big.NewInt(int64(2 * uint64(minGasLimit) * tx.GetGasPrice())) // 2 move balance + require.Equal(t, expectedRelayerFee, relayerFee) + require.Equal(t, big.NewInt(int64(tx.GetGasLimit()*tx.GetGasPrice())), totalFee) + }) + t.Run("should work guarded", func(t *testing.T) { + t.Parallel() + + argsLocal := createArgsForEconomicsData(1) + argsLocal.TxVersionChecker = &testscommon.TxVersionCheckerStub{ + IsGuardedTransactionCalled: func(tx *transaction.Transaction) bool { + return len(tx.InnerTransactions) > 0 // only the relayed tx is guarded + }, + } + economicsData, _ := economics.NewEconomicsData(argsLocal) + + extraGasLimitGuardedTx, _ := strconv.Atoi(argsLocal.Economics.FeeSettings.GasLimitSettings[0].ExtraGasLimitGuardedTx) + + txCopy := *tx + txCopy.GasLimit += uint64(extraGasLimitGuardedTx) + relayerFee, totalFee, err := economicsData.ComputeRelayedTxFees(&txCopy) + require.NoError(t, err) + expectedRelayerFee := big.NewInt(int64(2*uint64(minGasLimit)*txCopy.GetGasPrice() + uint64(extraGasLimitGuardedTx)*txCopy.GetGasPrice())) // 2 move balance + require.Equal(t, expectedRelayerFee, relayerFee) + require.Equal(t, big.NewInt(int64(txCopy.GetGasLimit()*txCopy.GetGasPrice())), totalFee) + }) +} + +func TestEconomicsData_SetTxTypeHandler(t *testing.T) { + t.Parallel() + + args := createArgsForEconomicsData(1) + economicsData, _ := economics.NewEconomicsData(args) + assert.NotNil(t, economicsData) + + err := economicsData.SetTxTypeHandler(nil) + require.Equal(t, process.ErrNilTxTypeHandler, err) + + err = economicsData.SetTxTypeHandler(&testscommon.TxTypeHandlerMock{}) + require.NoError(t, err) +} diff --git a/process/economics/interface.go b/process/economics/interface.go index 766ba7563e3..41332c30eef 100644 --- a/process/economics/interface.go +++ b/process/economics/interface.go @@ -1,17 +1,9 @@ package economics import ( - "github.com/multiversx/mx-chain-core-go/data" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) -// BuiltInFunctionsCostHandler is able to calculate the cost of a built-in function call -type BuiltInFunctionsCostHandler interface { - ComputeBuiltInCost(tx data.TransactionWithFeeHandler) uint64 - IsBuiltInFuncCall(tx data.TransactionWithFeeHandler) bool - IsInterfaceNil() bool -} - // EpochNotifier raises epoch change events type EpochNotifier interface { RegisterNotifyHandler(handler vmcommon.EpochSubscriberHandler) diff --git a/process/errors.go b/process/errors.go index 6ae40412109..a4e426691bd 100644 --- a/process/errors.go +++ b/process/errors.go @@ -194,6 +194,9 @@ var ErrNilShardCoordinator = errors.New("nil shard coordinator") // ErrNilNodesCoordinator signals that an operation has been attempted to or with a nil nodes coordinator var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") +// ErrNilStakingDataProvider signals that a nil staking data provider was used +var ErrNilStakingDataProvider = errors.New("nil staking data provider") + // ErrNilKeyGen signals that an operation has been attempted to or with a nil single sign key generator var ErrNilKeyGen = errors.New("nil key generator") @@ -981,12 +984,6 @@ var ErrMaxAccumulatedFeesExceeded = errors.New("max accumulated fees has been ex // ErrMaxDeveloperFeesExceeded signals that max developer fees has been exceeded var ErrMaxDeveloperFeesExceeded = errors.New("max developer fees has been exceeded") -// ErrNilBuiltInFunctionsCostHandler signals that a nil built-in functions cost handler has been provided -var ErrNilBuiltInFunctionsCostHandler = errors.New("nil built in functions cost handler") - -// ErrNilArgsBuiltInFunctionsConstHandler signals that a nil arguments struct for built-in functions cost handler has been provided -var ErrNilArgsBuiltInFunctionsConstHandler = errors.New("nil arguments for built in functions cost handler") - // ErrInvalidEpochStartMetaBlockConsensusPercentage signals that a small epoch start meta block consensus percentage has been provided var ErrInvalidEpochStartMetaBlockConsensusPercentage = errors.New("invalid epoch start meta block consensus percentage") @@ -1226,3 +1223,54 @@ var ErrNilStorageService = errors.New("nil storage service") // ErrInvalidAsyncArguments signals that invalid arguments were given for async/callBack processing var ErrInvalidAsyncArguments = errors.New("invalid arguments to process async/callback function") + +// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") + +// ErrTransferAndExecuteByUserAddressesAreNil signals that transfer and execute by user addresses are nil +var ErrTransferAndExecuteByUserAddressesAreNil = errors.New("transfer and execute by user addresses are nil") + +// ErrRelayedV3GasPriceMismatch signals that relayed v3 gas price is not equal with inner tx +var ErrRelayedV3GasPriceMismatch = errors.New("relayed tx v3 gas price mismatch") + +// ErrRelayedTxV3SenderDoesNotMatchReceiver signals that the sender of relayed tx v3 does not match the receiver +var ErrRelayedTxV3SenderDoesNotMatchReceiver = errors.New("relayed tx v3 sender does not match receiver") + +// ErrRelayedTxV3Disabled signals that the v3 version of relayed tx is disabled +var ErrRelayedTxV3Disabled = errors.New("relayed tx v3 is disabled") + +// ErrRelayedTxV3ZeroVal signals that the v3 version of relayed tx should be created with 0 as value +var ErrRelayedTxV3ZeroVal = errors.New("relayed tx v3 value should be 0") + +// ErrRelayedTxV3RelayerMismatch signals that the relayer address of the inner tx does not match the real relayer +var ErrRelayedTxV3RelayerMismatch = errors.New("relayed tx v3 relayer mismatch") + +// ErrRelayedTxV3GasLimitMismatch signals that relayed tx v3 gas limit is higher than user tx gas limit +var ErrRelayedTxV3GasLimitMismatch = errors.New("relayed tx v3 gas limit mismatch") + +// ErrNilRelayedTxV3Processor signals that a nil relayed tx v3 processor has been provided +var ErrNilRelayedTxV3Processor = errors.New("nil relayed tx v3 processor") + +// ErrRelayedTxV3SenderShardMismatch signals that the sender from inner transaction is from a different shard than relayer +var ErrRelayedTxV3SenderShardMismatch = errors.New("sender shard mismatch") + +// ErrNilRelayerAccount signals that a nil relayer accouont has been provided +var ErrNilRelayerAccount = errors.New("nil relayer account") + +// ErrRelayedTxV3TooManyInnerTransactions signals that too many inner transactions were provided +var ErrRelayedTxV3TooManyInnerTransactions = errors.New("too many inner transactions") + +// ErrConsumedFeesMismatch signals that the fees consumed from relayer do not match the inner transactions fees +var ErrConsumedFeesMismatch = errors.New("consumed fees mismatch") + +// ErrRelayedTxV3InvalidDataField signals that the data field is invalid +var ErrRelayedTxV3InvalidDataField = errors.New("invalid data field") + +// ErrMultipleRelayedTxTypesIsNotAllowed signals that multiple types of relayed tx is not allowed +var ErrMultipleRelayedTxTypesIsNotAllowed = errors.New("multiple relayed tx types is not allowed") + +// ErrNilFailedTxLogsAccumulator signals that a nil failed transaction logs accumulator has been provided +var ErrNilFailedTxLogsAccumulator = errors.New("nil failed transaction logs accumulator") + +// ErrEmptyInnerTransactions signals that the inner transactions slice is empty +var ErrEmptyInnerTransactions = errors.New("empty inner transactions") diff --git a/process/factory/interceptorscontainer/args.go b/process/factory/interceptorscontainer/args.go index 294e66290b3..0d224b031ad 100644 --- a/process/factory/interceptorscontainer/args.go +++ b/process/factory/interceptorscontainer/args.go @@ -43,4 +43,5 @@ type CommonInterceptorsContainerFactoryArgs struct { FullArchivePeerShardMapper process.PeerShardMapper HardforkTrigger heartbeat.HardforkTrigger NodeOperationMode common.NodeOperation + RelayedTxV3Processor process.RelayedTxV3Processor } diff --git a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go index 38d3e460bce..31a4344b771 100644 --- a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go @@ -99,6 +99,7 @@ func NewMetaInterceptorsContainerFactory( SignaturesHandler: args.SignaturesHandler, HeartbeatExpiryTimespanInSec: args.HeartbeatExpiryTimespanInSec, PeerID: args.MainMessenger.ID(), + RelayedTxV3Processor: args.RelayedTxV3Processor, } base := &baseInterceptorsContainerFactory{ diff --git a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go index c8ed20b5fad..b9124001264 100644 --- a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go +++ b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go @@ -18,6 +18,7 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -697,7 +698,7 @@ func getArgumentsMeta( WhiteListHandler: &testscommon.WhiteListHandlerStub{}, WhiteListerVerifiedTxs: &testscommon.WhiteListHandlerStub{}, AntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - ArgumentsParser: &mock.ArgumentParserMock{}, + ArgumentsParser: &testscommon.ArgumentParserMock{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, RequestHandler: &testscommon.RequestHandlerStub{}, PeerSignatureHandler: &mock.PeerSignatureHandlerStub{}, @@ -707,5 +708,6 @@ func getArgumentsMeta( FullArchivePeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, HardforkTrigger: &testscommon.HardforkTriggerStub{}, NodeOperationMode: common.NormalOperation, + RelayedTxV3Processor: &processMocks.RelayedTxV3ProcessorMock{}, } } diff --git a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go index beef288c54c..26224fbc152 100644 --- a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go @@ -98,6 +98,7 @@ func NewShardInterceptorsContainerFactory( SignaturesHandler: args.SignaturesHandler, HeartbeatExpiryTimespanInSec: args.HeartbeatExpiryTimespanInSec, PeerID: args.MainMessenger.ID(), + RelayedTxV3Processor: args.RelayedTxV3Processor, } base := &baseInterceptorsContainerFactory{ diff --git a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go index 24472c24f32..f802562ae35 100644 --- a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go +++ b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go @@ -22,6 +22,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -723,7 +724,7 @@ func getArgumentsShard( AntifloodHandler: &mock.P2PAntifloodHandlerStub{}, WhiteListHandler: &testscommon.WhiteListHandlerStub{}, WhiteListerVerifiedTxs: &testscommon.WhiteListHandlerStub{}, - ArgumentsParser: &mock.ArgumentParserMock{}, + ArgumentsParser: &testscommon.ArgumentParserMock{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, RequestHandler: &testscommon.RequestHandlerStub{}, PeerSignatureHandler: &mock.PeerSignatureHandlerStub{}, @@ -732,5 +733,6 @@ func getArgumentsShard( MainPeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, FullArchivePeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, HardforkTrigger: &testscommon.HardforkTriggerStub{}, + RelayedTxV3Processor: &processMocks.RelayedTxV3ProcessorMock{}, } } diff --git a/process/factory/metachain/intermediateProcessorsContainerFactory_test.go b/process/factory/metachain/intermediateProcessorsContainerFactory_test.go index 79861ced4bd..f58b8e41f72 100644 --- a/process/factory/metachain/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/metachain/intermediateProcessorsContainerFactory_test.go @@ -23,14 +23,14 @@ func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { func createMockArgsNewIntermediateProcessorsFactory() metachain.ArgsNewIntermediateProcessorsContainerFactory { args := metachain.ArgsNewIntermediateProcessorsContainerFactory{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), - PubkeyConverter: createMockPubkeyConverter(), - Store: &storageStubs.ChainStorerStub{}, - PoolsHolder: dataRetrieverMock.NewPoolsHolderMock(), - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), + PubkeyConverter: createMockPubkeyConverter(), + Store: &storageStubs.ChainStorerStub{}, + PoolsHolder: dataRetrieverMock.NewPoolsHolderMock(), + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } return args diff --git a/process/factory/metachain/vmContainerFactory.go b/process/factory/metachain/vmContainerFactory.go index c3dbb17e4e6..8f8fd90bbc9 100644 --- a/process/factory/metachain/vmContainerFactory.go +++ b/process/factory/metachain/vmContainerFactory.go @@ -44,10 +44,12 @@ type vmContainerFactory struct { scFactory vm.SystemSCContainerFactory shardCoordinator sharding.Coordinator enableEpochsHandler common.EnableEpochsHandler + nodesCoordinator vm.NodesCoordinator } // ArgsNewVMContainerFactory defines the arguments needed to create a new VM container factory type ArgsNewVMContainerFactory struct { + ArgBlockChainHook hooks.ArgBlockChainHook Economics process.EconomicsDataHandler MessageSignVerifier vm.MessageSignVerifier GasSchedule core.GasScheduleNotifier @@ -62,6 +64,7 @@ type ArgsNewVMContainerFactory struct { PubkeyConv core.PubkeyConverter BlockChainHook process.BlockChainHookWithAccountsAdapter EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinator vm.NodesCoordinator } // NewVMContainerFactory is responsible for creating a new virtual machine factory object @@ -108,6 +111,9 @@ func NewVMContainerFactory(args ArgsNewVMContainerFactory) (*vmContainerFactory, if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + if check.IfNil(args.NodesCoordinator) { + return nil, fmt.Errorf("%w in NewVMContainerFactory", process.ErrNilNodesCoordinator) + } cryptoHook := hooks.NewVMCryptoHook() @@ -127,6 +133,7 @@ func NewVMContainerFactory(args ArgsNewVMContainerFactory) (*vmContainerFactory, addressPubKeyConverter: args.PubkeyConv, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, + nodesCoordinator: args.NodesCoordinator, }, nil } @@ -200,6 +207,7 @@ func (vmf *vmContainerFactory) createSystemVMFactoryAndEEI() (vm.SystemSCContain AddressPubKeyConverter: vmf.addressPubKeyConverter, ShardCoordinator: vmf.shardCoordinator, EnableEpochsHandler: vmf.enableEpochsHandler, + NodesCoordinator: vmf.nodesCoordinator, } scFactory, err := systemVMFactory.NewSystemSCFactory(argsNewSystemScFactory) if err != nil { diff --git a/process/factory/metachain/vmContainerFactory_test.go b/process/factory/metachain/vmContainerFactory_test.go index ae466d23e8b..7e49beff1cc 100644 --- a/process/factory/metachain/vmContainerFactory_test.go +++ b/process/factory/metachain/vmContainerFactory_test.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/vm" wasmConfig "github.com/multiversx/mx-chain-vm-go/config" @@ -62,6 +63,14 @@ func createVmContainerMockArgument(gasSchedule core.GasScheduleNotifier) ArgsNew BleedPercentagePerRound: 1, MaxNumberOfNodesForStake: 1, ActivateBLSPubKeyMessageVerification: false, + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, + }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, }, }, ValidatorAccountsDB: &stateMock.AccountsStub{}, @@ -69,6 +78,9 @@ func createVmContainerMockArgument(gasSchedule core.GasScheduleNotifier) ArgsNew ChanceComputer: &mock.RaterMock{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakeFlag), + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }}, } } @@ -228,6 +240,18 @@ func TestNewVMContainerFactory_NilShardCoordinator(t *testing.T) { assert.True(t, errors.Is(err, vm.ErrNilShardCoordinator)) } +func TestNewVMContainerFactory_NilNodesCoordinatorFails(t *testing.T) { + t.Parallel() + + gasSchedule := makeGasSchedule() + argsNewVmContainerFactory := createVmContainerMockArgument(gasSchedule) + argsNewVmContainerFactory.NodesCoordinator = nil + vmf, err := NewVMContainerFactory(argsNewVmContainerFactory) + + assert.True(t, check.IfNil(vmf)) + assert.True(t, errors.Is(err, process.ErrNilNodesCoordinator)) +} + func TestNewVMContainerFactory_NilEnableEpochsHandler(t *testing.T) { t.Parallel() @@ -296,10 +320,9 @@ func TestVmContainerFactory_Create(t *testing.T) { MaxGasPriceSetGuardian: "100000", }, }, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -343,6 +366,8 @@ func TestVmContainerFactory_Create(t *testing.T) { MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -353,12 +378,21 @@ func TestVmContainerFactory_Create(t *testing.T) { MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: &stateMock.AccountsStub{}, UserAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, ShardCoordinator: mock.NewMultiShardsCoordinatorMock(1), EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }}, } vmf, err := NewVMContainerFactory(argsNewVMContainerFactory) assert.NotNil(t, vmf) diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index 2f2cc7a9c52..5835a7361ac 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -57,14 +57,14 @@ func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { func createMockArgsNewIntermediateProcessorsFactory() shard.ArgsNewIntermediateProcessorsContainerFactory { args := shard.ArgsNewIntermediateProcessorsContainerFactory{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), - PubkeyConverter: createMockPubkeyConverter(), - Store: &storageStubs.ChainStorerStub{}, - PoolsHolder: createDataPools(), - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), + PubkeyConverter: createMockPubkeyConverter(), + Store: &storageStubs.ChainStorerStub{}, + PoolsHolder: createDataPools(), + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } return args diff --git a/process/factory/shard/vmContainerFactory.go b/process/factory/shard/vmContainerFactory.go index 35c17f763a1..42e6ae3c98a 100644 --- a/process/factory/shard/vmContainerFactory.go +++ b/process/factory/shard/vmContainerFactory.go @@ -44,8 +44,13 @@ type vmContainerFactory struct { wasmVMChangeLocker common.Locker esdtTransferParser vmcommon.ESDTTransferParser hasher hashing.Hasher + pubKeyConverter core.PubkeyConverter + + mapOpcodeAddressIsAllowed map[string]map[string]struct{} } +const managedMultiTransferESDTNFTExecuteByUser = "managedMultiTransferESDTNFTExecuteByUser" + // ArgVMContainerFactory defines the arguments needed to the new VM factory type ArgVMContainerFactory struct { Config config.VirtualMachineConfig @@ -58,6 +63,7 @@ type ArgVMContainerFactory struct { BuiltInFunctions vmcommon.BuiltInFunctionContainer BlockChainHook process.BlockChainHookWithAccountsAdapter Hasher hashing.Hasher + PubKeyConverter core.PubkeyConverter } // NewVMContainerFactory is responsible for creating a new virtual machine factory object @@ -86,6 +92,9 @@ func NewVMContainerFactory(args ArgVMContainerFactory) (*vmContainerFactory, err if check.IfNil(args.Hasher) { return nil, process.ErrNilHasher } + if check.IfNil(args.PubKeyConverter) { + return nil, process.ErrNilPubkeyConverter + } cryptoHook := hooks.NewVMCryptoHook() @@ -102,6 +111,7 @@ func NewVMContainerFactory(args ArgVMContainerFactory) (*vmContainerFactory, err wasmVMChangeLocker: args.WasmVMChangeLocker, esdtTransferParser: args.ESDTTransferParser, hasher: args.Hasher, + pubKeyConverter: args.PubKeyConverter, } vmf.wasmVMVersions = args.Config.WasmVMVersions @@ -111,9 +121,34 @@ func NewVMContainerFactory(args ArgVMContainerFactory) (*vmContainerFactory, err return nil, err } + err = vmf.createMapOpCodeAddressIsAllowed() + if err != nil { + return nil, err + } + return vmf, nil } +func (vmf *vmContainerFactory) createMapOpCodeAddressIsAllowed() error { + vmf.mapOpcodeAddressIsAllowed = make(map[string]map[string]struct{}) + + transferAndExecuteByUserAddresses := vmf.config.TransferAndExecuteByUserAddresses + if len(transferAndExecuteByUserAddresses) == 0 { + return process.ErrTransferAndExecuteByUserAddressesAreNil + } + + vmf.mapOpcodeAddressIsAllowed[managedMultiTransferESDTNFTExecuteByUser] = make(map[string]struct{}) + for _, address := range transferAndExecuteByUserAddresses { + decodedAddress, errDecode := vmf.pubKeyConverter.Decode(address) + if errDecode != nil { + return errDecode + } + vmf.mapOpcodeAddressIsAllowed[managedMultiTransferESDTNFTExecuteByUser][string(decodedAddress)] = struct{}{} + } + + return nil +} + func (vmf *vmContainerFactory) sortWasmVMVersions() { sort.Slice(vmf.wasmVMVersions, func(i, j int) bool { return vmf.wasmVMVersions[i].StartEpoch < vmf.wasmVMVersions[j].StartEpoch @@ -351,6 +386,7 @@ func (vmf *vmContainerFactory) createInProcessWasmVMV15() (vmcommon.VMExecutionH EpochNotifier: vmf.epochNotifier, EnableEpochsHandler: vmf.enableEpochsHandler, Hasher: vmf.hasher, + MapOpcodeAddressIsAllowed: vmf.mapOpcodeAddressIsAllowed, } return wasmVMHost15.NewVMHost(vmf.blockChainHook, hostParameters) diff --git a/process/factory/shard/vmContainerFactory_test.go b/process/factory/shard/vmContainerFactory_test.go index df3ffab673e..1a4c72da3b0 100644 --- a/process/factory/shard/vmContainerFactory_test.go +++ b/process/factory/shard/vmContainerFactory_test.go @@ -1,6 +1,7 @@ package shard import ( + "runtime" "sync" "testing" @@ -31,6 +32,7 @@ func makeVMConfig() config.VirtualMachineConfig { {StartEpoch: 12, Version: "v1.3"}, {StartEpoch: 14, Version: "v1.4"}, }, + TransferAndExecuteByUserAddresses: []string{"erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe3"}, } } @@ -47,6 +49,7 @@ func createMockVMAccountsArguments() ArgVMContainerFactory { BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), BlockChainHook: &testscommon.BlockChainHookStub{}, Hasher: &hashingMocks.HasherMock{}, + PubKeyConverter: testscommon.RealWorldBech32PubkeyConverter, } } @@ -128,8 +131,6 @@ func TestNewVMContainerFactory_NilBlockChainHookShouldErr(t *testing.T) { } func TestNewVMContainerFactory_NilHasherShouldErr(t *testing.T) { - t.Parallel() - args := createMockVMAccountsArguments() args.Hasher = nil vmf, err := NewVMContainerFactory(args) @@ -138,8 +139,37 @@ func TestNewVMContainerFactory_NilHasherShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilHasher, err) } +func TestNewVMContainerFactory_NilPubKeyConverterShouldErr(t *testing.T) { + args := createMockVMAccountsArguments() + args.PubKeyConverter = nil + vmf, err := NewVMContainerFactory(args) + + assert.Nil(t, vmf) + assert.Equal(t, process.ErrNilPubkeyConverter, err) +} + +func TestNewVMContainerFactory_EmptyOpcodeAddressListErr(t *testing.T) { + args := createMockVMAccountsArguments() + args.Config.TransferAndExecuteByUserAddresses = nil + vmf, err := NewVMContainerFactory(args) + + assert.Nil(t, vmf) + assert.Equal(t, process.ErrTransferAndExecuteByUserAddressesAreNil, err) +} + +func TestNewVMContainerFactory_WrongAddressErr(t *testing.T) { + args := createMockVMAccountsArguments() + args.Config.TransferAndExecuteByUserAddresses = []string{"just"} + vmf, err := NewVMContainerFactory(args) + + assert.Nil(t, vmf) + assert.Equal(t, err.Error(), "invalid bech32 string length 4") +} + func TestNewVMContainerFactory_OkValues(t *testing.T) { - t.Parallel() + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } args := createMockVMAccountsArguments() vmf, err := NewVMContainerFactory(args) @@ -150,7 +180,9 @@ func TestNewVMContainerFactory_OkValues(t *testing.T) { } func TestVmContainerFactory_Create(t *testing.T) { - t.Parallel() + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } args := createMockVMAccountsArguments() vmf, _ := NewVMContainerFactory(args) @@ -172,9 +204,16 @@ func TestVmContainerFactory_Create(t *testing.T) { acc := vmf.BlockChainHookImpl() assert.NotNil(t, acc) + + assert.Equal(t, len(vmf.mapOpcodeAddressIsAllowed), 1) + assert.Equal(t, len(vmf.mapOpcodeAddressIsAllowed[managedMultiTransferESDTNFTExecuteByUser]), 1) } func TestVmContainerFactory_ResolveWasmVMVersion(t *testing.T) { + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } + epochNotifierInstance := forking.NewGenericEpochNotifier() numCalled := 0 diff --git a/process/headerCheck/common.go b/process/headerCheck/common.go new file mode 100644 index 00000000000..01946580d87 --- /dev/null +++ b/process/headerCheck/common.go @@ -0,0 +1,52 @@ +package headerCheck + +import ( + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) + +// ComputeConsensusGroup will compute the consensus group that assembled the provided block +func ComputeConsensusGroup(header data.HeaderHandler, nodesCoordinator nodesCoordinator.NodesCoordinator) (validatorsGroup []nodesCoordinator.Validator, err error) { + if check.IfNil(header) { + return nil, process.ErrNilHeaderHandler + } + if check.IfNil(nodesCoordinator) { + return nil, process.ErrNilNodesCoordinator + } + + prevRandSeed := header.GetPrevRandSeed() + + // TODO: change here with an activation flag if start of epoch block needs to be validated by the new epoch nodes + epoch := header.GetEpoch() + if header.IsStartOfEpochBlock() && epoch > 0 { + epoch = epoch - 1 + } + + return nodesCoordinator.ComputeConsensusGroup(prevRandSeed, header.GetRound(), header.GetShardID(), epoch) +} + +// ComputeSignersPublicKeys will extract from the provided consensus group slice only the strings that matched with the bitmap +func ComputeSignersPublicKeys(consensusGroup []string, bitmap []byte) []string { + nbBitsBitmap := len(bitmap) * 8 + consensusGroupSize := len(consensusGroup) + size := consensusGroupSize + if consensusGroupSize > nbBitsBitmap { + size = nbBitsBitmap + } + + result := make([]string, 0, len(consensusGroup)) + + for i := 0; i < size; i++ { + indexRequired := (bitmap[i/8] & (1 << uint16(i%8))) > 0 + if !indexRequired { + continue + } + + pubKey := consensusGroup[i] + result = append(result, pubKey) + } + + return result +} diff --git a/process/headerCheck/common_test.go b/process/headerCheck/common_test.go new file mode 100644 index 00000000000..0961b7f2a20 --- /dev/null +++ b/process/headerCheck/common_test.go @@ -0,0 +1,187 @@ +package headerCheck + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/stretchr/testify/assert" +) + +func TestComputeConsensusGroup(t *testing.T) { + t.Parallel() + + t.Run("nil header should error", func(t *testing.T) { + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + assert.Fail(t, "should have not called ComputeValidatorsGroupCalled") + return nil, nil + } + + vGroup, err := ComputeConsensusGroup(nil, nodesCoordinatorInstance) + assert.Equal(t, process.ErrNilHeaderHandler, err) + assert.Nil(t, vGroup) + }) + t.Run("nil nodes coordinator should error", func(t *testing.T) { + header := &block.Header{ + Epoch: 1123, + Round: 37373, + Nonce: 38383, + ShardID: 2, + PrevRandSeed: []byte("prev rand seed"), + } + + vGroup, err := ComputeConsensusGroup(header, nil) + assert.Equal(t, process.ErrNilNodesCoordinator, err) + assert.Nil(t, vGroup) + }) + t.Run("should work for a random block", func(t *testing.T) { + header := &block.Header{ + Epoch: 1123, + Round: 37373, + Nonce: 38383, + ShardID: 2, + PrevRandSeed: []byte("prev rand seed"), + } + + validator1, _ := nodesCoordinator.NewValidator([]byte("pk1"), 1, 1) + validator2, _ := nodesCoordinator.NewValidator([]byte("pk2"), 1, 2) + + validatorGroup := []nodesCoordinator.Validator{validator1, validator2} + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + assert.Equal(t, header.PrevRandSeed, randomness) + assert.Equal(t, header.Round, round) + assert.Equal(t, header.ShardID, shardId) + assert.Equal(t, header.Epoch, epoch) + + return validatorGroup, nil + } + + vGroup, err := ComputeConsensusGroup(header, nodesCoordinatorInstance) + assert.Nil(t, err) + assert.Equal(t, validatorGroup, vGroup) + }) + t.Run("should work for a start of epoch block", func(t *testing.T) { + header := &block.Header{ + Epoch: 1123, + Round: 37373, + Nonce: 38383, + ShardID: 2, + PrevRandSeed: []byte("prev rand seed"), + EpochStartMetaHash: []byte("epoch start metahash"), + } + + validator1, _ := nodesCoordinator.NewValidator([]byte("pk1"), 1, 1) + validator2, _ := nodesCoordinator.NewValidator([]byte("pk2"), 1, 2) + + validatorGroup := []nodesCoordinator.Validator{validator1, validator2} + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + assert.Equal(t, header.PrevRandSeed, randomness) + assert.Equal(t, header.Round, round) + assert.Equal(t, header.ShardID, shardId) + assert.Equal(t, header.Epoch-1, epoch) + + return validatorGroup, nil + } + + vGroup, err := ComputeConsensusGroup(header, nodesCoordinatorInstance) + assert.Nil(t, err) + assert.Equal(t, validatorGroup, vGroup) + }) +} + +func generatePubKeys(num int) []string { + consensusGroup := make([]string, 0, num) + for i := 0; i < num; i++ { + consensusGroup = append(consensusGroup, fmt.Sprintf("pub key %d", i)) + } + + return consensusGroup +} + +func TestComputeSignersPublicKeys(t *testing.T) { + t.Parallel() + + t.Run("should compute with 16 validators", func(t *testing.T) { + t.Parallel() + + consensusGroup := generatePubKeys(16) + mask0 := byte(0b00110101) + mask1 := byte(0b01001101) + + result := ComputeSignersPublicKeys(consensusGroup, []byte{mask0, mask1}) + expected := []string{ + "pub key 0", + "pub key 2", + "pub key 4", + "pub key 5", + + "pub key 8", + "pub key 10", + "pub key 11", + "pub key 14", + } + + assert.Equal(t, expected, result) + }) + t.Run("should compute with 14 validators", func(t *testing.T) { + t.Parallel() + + consensusGroup := generatePubKeys(14) + mask0 := byte(0b00110101) + mask1 := byte(0b00001101) + + result := ComputeSignersPublicKeys(consensusGroup, []byte{mask0, mask1}) + expected := []string{ + "pub key 0", + "pub key 2", + "pub key 4", + "pub key 5", + + "pub key 8", + "pub key 10", + "pub key 11", + } + + assert.Equal(t, expected, result) + }) + t.Run("should compute with 14 validators, mask is 0", func(t *testing.T) { + t.Parallel() + + consensusGroup := generatePubKeys(14) + mask0 := byte(0b00000000) + mask1 := byte(0b00000000) + + result := ComputeSignersPublicKeys(consensusGroup, []byte{mask0, mask1}) + expected := make([]string, 0) + + assert.Equal(t, expected, result) + }) + t.Run("should compute with 14 validators, mask contains all bits set", func(t *testing.T) { + t.Parallel() + + consensusGroup := generatePubKeys(14) + mask0 := byte(0b11111111) + mask1 := byte(0b00111111) + + result := ComputeSignersPublicKeys(consensusGroup, []byte{mask0, mask1}) + + assert.Equal(t, consensusGroup, result) + }) + t.Run("should compute with 17 validators, mask contains 2 bytes", func(t *testing.T) { + t.Parallel() + + consensusGroup := generatePubKeys(17) + mask0 := byte(0b11111111) + mask1 := byte(0b11111111) + + result := ComputeSignersPublicKeys(consensusGroup, []byte{mask0, mask1}) + expected := generatePubKeys(16) + assert.Equal(t, expected, result) + }) +} diff --git a/process/headerCheck/headerSignatureVerify.go b/process/headerCheck/headerSignatureVerify.go index 999bc82e881..308af919366 100644 --- a/process/headerCheck/headerSignatureVerify.go +++ b/process/headerCheck/headerSignatureVerify.go @@ -30,7 +30,7 @@ type ArgsHeaderSigVerifier struct { FallbackHeaderValidator process.FallbackHeaderValidator } -//HeaderSigVerifier is component used to check if a header is valid +// HeaderSigVerifier is component used to check if a header is valid type HeaderSigVerifier struct { marshalizer marshal.Marshalizer hasher hashing.Hasher @@ -301,15 +301,7 @@ func (hsv *HeaderSigVerifier) verifyLeaderSignature(leaderPubKey crypto.PublicKe } func (hsv *HeaderSigVerifier) getLeader(header data.HeaderHandler) (crypto.PublicKey, error) { - prevRandSeed := header.GetPrevRandSeed() - - // TODO: remove if start of epoch block needs to be validated by the new epoch nodes - epoch := header.GetEpoch() - if header.IsStartOfEpochBlock() && epoch > 0 { - epoch = epoch - 1 - } - - headerConsensusGroup, err := hsv.nodesCoordinator.ComputeConsensusGroup(prevRandSeed, header.GetRound(), header.GetShardID(), epoch) + headerConsensusGroup, err := ComputeConsensusGroup(header, hsv.nodesCoordinator) if err != nil { return nil, err } diff --git a/process/interceptors/factory/argInterceptedDataFactory.go b/process/interceptors/factory/argInterceptedDataFactory.go index 37701a92f7a..36ab4968375 100644 --- a/process/interceptors/factory/argInterceptedDataFactory.go +++ b/process/interceptors/factory/argInterceptedDataFactory.go @@ -57,4 +57,5 @@ type ArgInterceptedDataFactory struct { SignaturesHandler process.SignaturesHandler HeartbeatExpiryTimespanInSec int64 PeerID core.PeerID + RelayedTxV3Processor process.RelayedTxV3Processor } diff --git a/process/interceptors/factory/interceptedMetaHeaderDataFactory_test.go b/process/interceptors/factory/interceptedMetaHeaderDataFactory_test.go index 0912de698c1..d2ecc63e59d 100644 --- a/process/interceptors/factory/interceptedMetaHeaderDataFactory_test.go +++ b/process/interceptors/factory/interceptedMetaHeaderDataFactory_test.go @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + testProcessMocks "github.com/multiversx/mx-chain-go/testscommon/processMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/stretchr/testify/assert" ) @@ -101,11 +102,12 @@ func createMockArgument( ValidityAttester: &mock.ValidityAttesterStub{}, HeaderIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, EpochStartTrigger: &mock.EpochStartTriggerStub{}, - ArgsParser: &mock.ArgumentParserMock{}, + ArgsParser: &testscommon.ArgumentParserMock{}, PeerSignatureHandler: &processMocks.PeerSignatureHandlerStub{}, SignaturesHandler: &processMocks.SignaturesHandlerStub{}, HeartbeatExpiryTimespanInSec: 30, PeerID: "pid", + RelayedTxV3Processor: &testProcessMocks.RelayedTxV3ProcessorMock{}, } } diff --git a/process/interceptors/factory/interceptedTxDataFactory.go b/process/interceptors/factory/interceptedTxDataFactory.go index 563997c5066..e2dc86e599c 100644 --- a/process/interceptors/factory/interceptedTxDataFactory.go +++ b/process/interceptors/factory/interceptedTxDataFactory.go @@ -31,6 +31,7 @@ type interceptedTxDataFactory struct { txSignHasher hashing.Hasher txVersionChecker process.TxVersionCheckerHandler enableEpochsHandler common.EnableEpochsHandler + relayedTxV3Processor process.RelayedTxV3Processor } // NewInterceptedTxDataFactory creates an instance of interceptedTxDataFactory @@ -107,6 +108,7 @@ func NewInterceptedTxDataFactory(argument *ArgInterceptedDataFactory) (*intercep txSignHasher: argument.CoreComponents.TxSignHasher(), txVersionChecker: argument.CoreComponents.TxVersionChecker(), enableEpochsHandler: argument.CoreComponents.EnableEpochsHandler(), + relayedTxV3Processor: argument.RelayedTxV3Processor, } return itdf, nil @@ -130,6 +132,8 @@ func (itdf *interceptedTxDataFactory) Create(buff []byte) (process.InterceptedDa itdf.enableEpochsHandler.IsFlagEnabled(common.TransactionSignedWithTxHashFlag), itdf.txSignHasher, itdf.txVersionChecker, + itdf.enableEpochsHandler, + itdf.relayedTxV3Processor, ) } diff --git a/process/interface.go b/process/interface.go index ee86ee3302c..8e943d0a44e 100644 --- a/process/interface.go +++ b/process/interface.go @@ -20,6 +20,9 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" crypto "github.com/multiversx/mx-chain-crypto-go" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-vm-common-go/parsers" + "github.com/multiversx/mx-chain-go/common" cryptoCommon "github.com/multiversx/mx-chain-go/common/crypto" "github.com/multiversx/mx-chain-go/epochStart" @@ -30,8 +33,6 @@ import ( "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/multiversx/mx-chain-vm-common-go/parsers" ) // TransactionProcessor is the main interface for transaction execution engine @@ -170,7 +171,7 @@ type TransactionCoordinator interface { VerifyCreatedBlockTransactions(hdr data.HeaderHandler, body *block.Body) error GetCreatedInShardMiniBlocks() []*block.MiniBlock VerifyCreatedMiniBlocks(hdr data.HeaderHandler, body *block.Body) error - AddIntermediateTransactions(mapSCRs map[block.Type][]data.TransactionHandler) error + AddIntermediateTransactions(mapSCRs map[block.Type][]data.TransactionHandler, key []byte) error GetAllIntermediateTxs() map[block.Type]map[string]data.TransactionHandler AddTxsFromMiniBlocks(miniBlocks block.MiniBlockSlice) AddTransactions(txHandlers []data.TransactionHandler, blockType block.Type) @@ -190,7 +191,7 @@ type SmartContractProcessor interface { // IntermediateTransactionHandler handles transactions which are not resolved in only one step type IntermediateTransactionHandler interface { - AddIntermediateTransactions(txs []data.TransactionHandler) error + AddIntermediateTransactions(txs []data.TransactionHandler, key []byte) error GetNumOfCrossInterMbsAndTxs() (int, int) CreateAllInterMiniBlocks() []*block.MiniBlock VerifyInterMiniBlocks(body *block.Body) error @@ -199,7 +200,7 @@ type IntermediateTransactionHandler interface { CreateBlockStarted() GetCreatedInShardMiniBlock() *block.MiniBlock RemoveProcessedResults(key []byte) [][]byte - InitProcessedResults(key []byte) + InitProcessedResults(key []byte, parentKey []byte) IsInterfaceNil() bool } @@ -287,9 +288,9 @@ type ValidatorStatisticsProcessor interface { Process(shardValidatorInfo data.ShardValidatorInfoHandler) error IsInterfaceNil() bool RootHash() ([]byte, error) - ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error + ResetValidatorStatisticsAtNewEpoch(vInfos state.ShardValidatorsInfoMapHandler) error + GetValidatorInfoForRootHash(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) + ProcessRatingsEndOfEpoch(validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32) error Commit() ([]byte, error) DisplayRatings(epoch uint32) SetLastFinalizedRootHash([]byte) @@ -318,6 +319,8 @@ type TransactionLogProcessorDatabase interface { // ValidatorsProvider is the main interface for validators' provider type ValidatorsProvider interface { GetLatestValidators() map[string]*validator.ValidatorStatistics + GetAuctionList() ([]*common.AuctionListValidatorAPIResponse, error) + ForceUpdate() error IsInterfaceNil() bool Close() error } @@ -696,6 +699,7 @@ type feeHandler interface { ComputeGasLimitInEpoch(tx data.TransactionWithFeeHandler, epoch uint32) uint64 ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) ComputeTxFeeBasedOnGasUsedInEpoch(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int + ComputeRelayedTxFees(tx data.TransactionWithFeeHandler) (*big.Int, *big.Int, error) } // TxGasHandler handles a transaction gas and gas cost @@ -722,6 +726,7 @@ type EconomicsDataHandler interface { rewardsHandler feeHandler SetStatusHandler(statusHandler core.AppStatusHandler) error + SetTxTypeHandler(txTypeHandler TxTypeHandler) error IsInterfaceNil() bool } @@ -945,10 +950,10 @@ type EpochStartDataCreator interface { // RewardsCreator defines the functionality for the metachain to create rewards at end of epoch type RewardsCreator interface { CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error GetProtocolSustainabilityRewards() *big.Int GetLocalTxCache() epochStart.TransactionCacher @@ -962,8 +967,8 @@ type RewardsCreator interface { // EpochStartValidatorInfoCreator defines the functionality for the metachain to create validator statistics at end of epoch type EpochStartValidatorInfoCreator interface { - CreateValidatorInfoMiniBlocks(validatorInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) - VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error + CreateValidatorInfoMiniBlocks(validatorInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) + VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo state.ShardValidatorsInfoMapHandler) error GetLocalValidatorInfoCache() epochStart.ValidatorInfoCacher CreateMarshalledData(body *block.Body) map[string][][]byte GetValidatorInfoTxs(body *block.Body) map[string]*state.ShardValidatorInfo @@ -975,7 +980,10 @@ type EpochStartValidatorInfoCreator interface { // EpochStartSystemSCProcessor defines the functionality for the metachain to process system smart contract and end of epoch type EpochStartSystemSCProcessor interface { - ProcessSystemSmartContract(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error + ProcessSystemSmartContract( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, + ) error ProcessDelegationRewards( miniBlocks block.MiniBlockSlice, rewardTxs epochStart.TransactionCacher, @@ -1314,7 +1322,7 @@ type TxsSenderHandler interface { // PreProcessorExecutionInfoHandler handles pre processor execution info needed by the transactions preprocessors type PreProcessorExecutionInfoHandler interface { GetNumOfCrossInterMbsAndTxs() (int, int) - InitProcessedTxsResults(key []byte) + InitProcessedTxsResults(key []byte, parentKey []byte) RevertProcessedTxsResults(txHashes [][]byte, key []byte) } @@ -1345,3 +1353,25 @@ type Debugger interface { Close() error IsInterfaceNil() bool } + +// SentSignaturesTracker defines a component able to handle sent signature from self +type SentSignaturesTracker interface { + StartRound() + SignatureSent(pkBytes []byte) + ResetCountersForManagedBlockSigner(signerPk []byte) + IsInterfaceNil() bool +} + +// RelayedTxV3Processor defines a component able to check and process relayed transactions v3 +type RelayedTxV3Processor interface { + CheckRelayedTx(tx *transaction.Transaction) error + IsInterfaceNil() bool +} + +// FailedTxLogsAccumulator defines a component able to accumulate logs during a relayed tx execution +type FailedTxLogsAccumulator interface { + GetLogs(txHash []byte) (data.TransactionHandler, []*vmcommon.LogEntry, bool) + SaveLogs(txHash []byte, tx data.TransactionHandler, logs []*vmcommon.LogEntry) error + Remove(txHash []byte) + IsInterfaceNil() bool +} diff --git a/process/mock/argumentsParserMock.go b/process/mock/argumentsParserMock.go deleted file mode 100644 index 02ce8f408ae..00000000000 --- a/process/mock/argumentsParserMock.go +++ /dev/null @@ -1,60 +0,0 @@ -package mock - -import ( - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/multiversx/mx-chain-vm-common-go/parsers" -) - -// ArgumentParserMock - -type ArgumentParserMock struct { - ParseCallDataCalled func(data string) (string, [][]byte, error) - ParseArgumentsCalled func(data string) ([][]byte, error) - ParseDeployDataCalled func(data string) (*parsers.DeployArgs, error) - CreateDataFromStorageUpdateCalled func(storageUpdates []*vmcommon.StorageUpdate) string - GetStorageUpdatesCalled func(data string) ([]*vmcommon.StorageUpdate, error) -} - -// ParseCallData - -func (ap *ArgumentParserMock) ParseCallData(data string) (string, [][]byte, error) { - if ap.ParseCallDataCalled == nil { - return "", nil, nil - } - return ap.ParseCallDataCalled(data) -} - -// ParseArguments - -func (ap *ArgumentParserMock) ParseArguments(data string) ([][]byte, error) { - if ap.ParseArgumentsCalled == nil { - return [][]byte{}, nil - } - return ap.ParseArgumentsCalled(data) -} - -// ParseDeployData - -func (ap *ArgumentParserMock) ParseDeployData(data string) (*parsers.DeployArgs, error) { - if ap.ParseDeployDataCalled == nil { - return nil, nil - } - return ap.ParseDeployDataCalled(data) -} - -// CreateDataFromStorageUpdate - -func (ap *ArgumentParserMock) CreateDataFromStorageUpdate(storageUpdates []*vmcommon.StorageUpdate) string { - if ap.CreateDataFromStorageUpdateCalled == nil { - return "" - } - return ap.CreateDataFromStorageUpdateCalled(storageUpdates) -} - -// GetStorageUpdates - -func (ap *ArgumentParserMock) GetStorageUpdates(data string) ([]*vmcommon.StorageUpdate, error) { - if ap.GetStorageUpdatesCalled == nil { - return nil, nil - } - return ap.GetStorageUpdatesCalled(data) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (ap *ArgumentParserMock) IsInterfaceNil() bool { - return ap == nil -} diff --git a/process/mock/builtInCostHandlerStub.go b/process/mock/builtInCostHandlerStub.go deleted file mode 100644 index 4ee3b23b062..00000000000 --- a/process/mock/builtInCostHandlerStub.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { -} - -// ComputeBuiltInCost - -func (b *BuiltInCostHandlerStub) ComputeBuiltInCost(_ data.TransactionWithFeeHandler) uint64 { - return 1 -} - -// IsBuiltInFuncCall - -func (b *BuiltInCostHandlerStub) IsBuiltInFuncCall(_ data.TransactionWithFeeHandler) bool { - return false -} - -// IsInterfaceNil - -func (b *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return b == nil -} diff --git a/process/mock/epochEconomicsStub.go b/process/mock/epochEconomicsStub.go index 99e8b0dd359..7a65f7c3fcf 100644 --- a/process/mock/epochEconomicsStub.go +++ b/process/mock/epochEconomicsStub.go @@ -19,7 +19,9 @@ func (e *EpochEconomicsStub) ComputeEndOfEpochEconomics(metaBlock *block.MetaBlo if e.ComputeEndOfEpochEconomicsCalled != nil { return e.ComputeEndOfEpochEconomicsCalled(metaBlock) } - return &block.Economics{}, nil + return &block.Economics{ + RewardsForProtocolSustainability: big.NewInt(0), + }, nil } // VerifyRewardsPerBlock - diff --git a/process/mock/epochRewardsCreatorStub.go b/process/mock/epochRewardsCreatorStub.go deleted file mode 100644 index ce17c1e636a..00000000000 --- a/process/mock/epochRewardsCreatorStub.go +++ /dev/null @@ -1,109 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochRewardsCreatorStub - -type EpochRewardsCreatorStub struct { - CreateRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) (block.MiniBlockSlice, error) - VerifyRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) error - CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte - SaveBlockDataToStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - DeleteBlockDataFromStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - RemoveBlockDataFromPoolsCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - GetRewardsTxsCalled func(body *block.Body) map[string]data.TransactionHandler - GetProtocolSustainCalled func() *big.Int - GetLocalTxCacheCalled func() epochStart.TransactionCacher -} - -// GetProtocolSustainabilityRewards - -func (e *EpochRewardsCreatorStub) GetProtocolSustainabilityRewards() *big.Int { - if e.GetProtocolSustainCalled != nil { - return e.GetProtocolSustainCalled() - } - return big.NewInt(0) -} - -// GetLocalTxCache - -func (e *EpochRewardsCreatorStub) GetLocalTxCache() epochStart.TransactionCacher { - if e.GetLocalTxCacheCalled != nil { - return e.GetLocalTxCacheCalled() - } - return &TxForCurrentBlockStub{} -} - -// CreateRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) (block.MiniBlockSlice, error) { - if e.CreateRewardsMiniBlocksCalled != nil { - return e.CreateRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil, nil -} - -// VerifyRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) error { - if e.VerifyRewardsMiniBlocksCalled != nil { - return e.VerifyRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil -} - -// CreateMarshalledData - -func (e *EpochRewardsCreatorStub) CreateMarshalledData(body *block.Body) map[string][][]byte { - if e.CreateMarshalledDataCalled != nil { - return e.CreateMarshalledDataCalled(body) - } - return nil -} - -// GetRewardsTxs - -func (e *EpochRewardsCreatorStub) GetRewardsTxs(body *block.Body) map[string]data.TransactionHandler { - if e.GetRewardsTxsCalled != nil { - return e.GetRewardsTxsCalled(body) - } - return nil -} - -// SaveBlockDataToStorage - -func (e *EpochRewardsCreatorStub) SaveBlockDataToStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.SaveBlockDataToStorageCalled != nil { - e.SaveBlockDataToStorageCalled(metaBlock, body) - } -} - -// DeleteBlockDataFromStorage - -func (e *EpochRewardsCreatorStub) DeleteBlockDataFromStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.DeleteBlockDataFromStorageCalled != nil { - e.DeleteBlockDataFromStorageCalled(metaBlock, body) - } -} - -// IsInterfaceNil - -func (e *EpochRewardsCreatorStub) IsInterfaceNil() bool { - return e == nil -} - -// RemoveBlockDataFromPools - -func (e *EpochRewardsCreatorStub) RemoveBlockDataFromPools(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.RemoveBlockDataFromPoolsCalled != nil { - e.RemoveBlockDataFromPoolsCalled(metaBlock, body) - } -} diff --git a/process/mock/epochStartDataCreatorStub.go b/process/mock/epochStartDataCreatorStub.go index 1cbfccaec5b..dd38c5a1198 100644 --- a/process/mock/epochStartDataCreatorStub.go +++ b/process/mock/epochStartDataCreatorStub.go @@ -1,6 +1,9 @@ package mock -import "github.com/multiversx/mx-chain-core-go/data/block" +import ( + "github.com/multiversx/mx-chain-core-go/data/block" + "math/big" +) // EpochStartDataCreatorStub - type EpochStartDataCreatorStub struct { @@ -13,7 +16,11 @@ func (e *EpochStartDataCreatorStub) CreateEpochStartData() (*block.EpochStart, e if e.CreateEpochStartDataCalled != nil { return e.CreateEpochStartDataCalled() } - return &block.EpochStart{}, nil + return &block.EpochStart{ + LastFinalizedHeaders: []block.EpochStartShardData{{}}, + Economics: block.Economics{ + RewardsForProtocolSustainability: big.NewInt(0)}, + }, nil } // VerifyEpochStartDataForMetablock - diff --git a/process/mock/epochStartSystemSCStub.go b/process/mock/epochStartSystemSCStub.go deleted file mode 100644 index fd2c92553cf..00000000000 --- a/process/mock/epochStartSystemSCStub.go +++ /dev/null @@ -1,46 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochStartSystemSCStub - -type EpochStartSystemSCStub struct { - ProcessSystemSmartContractCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error - ProcessDelegationRewardsCalled func(miniBlocks block.MiniBlockSlice, txCache epochStart.TransactionCacher) error - ToggleUnStakeUnBondCalled func(value bool) error -} - -// ToggleUnStakeUnBond - -func (e *EpochStartSystemSCStub) ToggleUnStakeUnBond(value bool) error { - if e.ToggleUnStakeUnBondCalled != nil { - return e.ToggleUnStakeUnBondCalled(value) - } - return nil -} - -// ProcessSystemSmartContract - -func (e *EpochStartSystemSCStub) ProcessSystemSmartContract(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { - if e.ProcessSystemSmartContractCalled != nil { - return e.ProcessSystemSmartContractCalled(validatorInfos, nonce, epoch) - } - return nil -} - -// ProcessDelegationRewards - -func (e *EpochStartSystemSCStub) ProcessDelegationRewards( - miniBlocks block.MiniBlockSlice, - txCache epochStart.TransactionCacher, -) error { - if e.ProcessDelegationRewardsCalled != nil { - return e.ProcessDelegationRewardsCalled(miniBlocks, txCache) - } - return nil -} - -// IsInterfaceNil - -func (e *EpochStartSystemSCStub) IsInterfaceNil() bool { - return e == nil -} diff --git a/process/mock/intermProcessorStub.go b/process/mock/intermProcessorStub.go index 3909bfd83fc..dde08776bd4 100644 --- a/process/mock/intermProcessorStub.go +++ b/process/mock/intermProcessorStub.go @@ -7,7 +7,7 @@ import ( // IntermediateTransactionHandlerStub - type IntermediateTransactionHandlerStub struct { - AddIntermediateTransactionsCalled func(txs []data.TransactionHandler) error + AddIntermediateTransactionsCalled func(txs []data.TransactionHandler, key []byte) error GetNumOfCrossInterMbsAndTxsCalled func() (int, int) CreateAllInterMiniBlocksCalled func() []*block.MiniBlock VerifyInterMiniBlocksCalled func(body *block.Body) error @@ -16,7 +16,7 @@ type IntermediateTransactionHandlerStub struct { CreateMarshalledDataCalled func(txHashes [][]byte) ([][]byte, error) GetAllCurrentFinishedTxsCalled func() map[string]data.TransactionHandler RemoveProcessedResultsCalled func(key []byte) [][]byte - InitProcessedResultsCalled func(key []byte) + InitProcessedResultsCalled func(key []byte, parentKey []byte) intermediateTransactions []data.TransactionHandler } @@ -29,9 +29,9 @@ func (ith *IntermediateTransactionHandlerStub) RemoveProcessedResults(key []byte } // InitProcessedResults - -func (ith *IntermediateTransactionHandlerStub) InitProcessedResults(key []byte) { +func (ith *IntermediateTransactionHandlerStub) InitProcessedResults(key []byte, parentKey []byte) { if ith.InitProcessedResultsCalled != nil { - ith.InitProcessedResultsCalled(key) + ith.InitProcessedResultsCalled(key, parentKey) } } @@ -44,12 +44,12 @@ func (ith *IntermediateTransactionHandlerStub) CreateMarshalledData(txHashes [][ } // AddIntermediateTransactions - -func (ith *IntermediateTransactionHandlerStub) AddIntermediateTransactions(txs []data.TransactionHandler) error { +func (ith *IntermediateTransactionHandlerStub) AddIntermediateTransactions(txs []data.TransactionHandler, key []byte) error { if ith.AddIntermediateTransactionsCalled == nil { ith.intermediateTransactions = append(ith.intermediateTransactions, txs...) return nil } - return ith.AddIntermediateTransactionsCalled(txs) + return ith.AddIntermediateTransactionsCalled(txs, key) } // GetIntermediateTransactions - diff --git a/process/mock/intermediateTransactionHandlerMock.go b/process/mock/intermediateTransactionHandlerMock.go index a7d0a5b3be6..7bd71c3475c 100644 --- a/process/mock/intermediateTransactionHandlerMock.go +++ b/process/mock/intermediateTransactionHandlerMock.go @@ -7,7 +7,7 @@ import ( // IntermediateTransactionHandlerMock - type IntermediateTransactionHandlerMock struct { - AddIntermediateTransactionsCalled func(txs []data.TransactionHandler) error + AddIntermediateTransactionsCalled func(txs []data.TransactionHandler, key []byte) error GetNumOfCrossInterMbsAndTxsCalled func() (int, int) CreateAllInterMiniBlocksCalled func() []*block.MiniBlock VerifyInterMiniBlocksCalled func(body *block.Body) error @@ -16,7 +16,7 @@ type IntermediateTransactionHandlerMock struct { CreateMarshalledDataCalled func(txHashes [][]byte) ([][]byte, error) GetAllCurrentFinishedTxsCalled func() map[string]data.TransactionHandler RemoveProcessedResultsCalled func(key []byte) [][]byte - InitProcessedResultsCalled func(key []byte) + InitProcessedResultsCalled func(key []byte, parentKey []byte) GetCreatedInShardMiniBlockCalled func() *block.MiniBlock intermediateTransactions []data.TransactionHandler } @@ -30,9 +30,9 @@ func (ith *IntermediateTransactionHandlerMock) RemoveProcessedResults(key []byte } // InitProcessedResults - -func (ith *IntermediateTransactionHandlerMock) InitProcessedResults(key []byte) { +func (ith *IntermediateTransactionHandlerMock) InitProcessedResults(key []byte, parentKey []byte) { if ith.InitProcessedResultsCalled != nil { - ith.InitProcessedResultsCalled(key) + ith.InitProcessedResultsCalled(key, parentKey) } } @@ -45,12 +45,12 @@ func (ith *IntermediateTransactionHandlerMock) CreateMarshalledData(txHashes [][ } // AddIntermediateTransactions - -func (ith *IntermediateTransactionHandlerMock) AddIntermediateTransactions(txs []data.TransactionHandler) error { +func (ith *IntermediateTransactionHandlerMock) AddIntermediateTransactions(txs []data.TransactionHandler, key []byte) error { if ith.AddIntermediateTransactionsCalled == nil { ith.intermediateTransactions = append(ith.intermediateTransactions, txs...) return nil } - return ith.AddIntermediateTransactionsCalled(txs) + return ith.AddIntermediateTransactionsCalled(txs, key) } // GetIntermediateTransactions - diff --git a/process/mock/nodesSetupStub.go b/process/mock/nodesSetupStub.go deleted file mode 100644 index b8e21ce5fcb..00000000000 --- a/process/mock/nodesSetupStub.go +++ /dev/null @@ -1,188 +0,0 @@ -package mock - -import "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - GetAdaptivityCalled func() bool - GetHysteresisCalled func() float32 - GetShardIDForPubKeyCalled func(pubkey []byte) (uint32, error) - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - InitialNodesPubKeysCalled func() map[uint32][]string - MinNumberOfMetaNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - MinNumberOfNodesWithHysteresisCalled func() uint32 - MinShardHysteresisNodesCalled func() uint32 - MinMetaHysteresisNodesCalled func() uint32 -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 1 -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 0 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 0 -} - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 0 -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 0 -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - return nil, nil -} - -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() - } - return nil -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - - return false -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - - return 0 -} - -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) - } - return 0, nil -} - -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) - } - - return []string{"val1", "val2"}, nil -} - -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() - } - - return map[uint32][]string{0: {"val1", "val2"}} -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - - return 1 -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - - return 1 -} - -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() - } - return n.MinNumberOfNodes() -} - -// MinShardHysteresisNodes - -func (n *NodesSetupStub) MinShardHysteresisNodes() uint32 { - if n.MinShardHysteresisNodesCalled != nil { - return n.MinShardHysteresisNodesCalled() - } - return 1 -} - -// MinMetaHysteresisNodes - -func (n *NodesSetupStub) MinMetaHysteresisNodes() uint32 { - if n.MinMetaHysteresisNodesCalled != nil { - return n.MinMetaHysteresisNodesCalled() - } - return 1 -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/process/mock/transactionSimulatorStub.go b/process/mock/transactionSimulatorStub.go index 70363230936..971cda66d04 100644 --- a/process/mock/transactionSimulatorStub.go +++ b/process/mock/transactionSimulatorStub.go @@ -1,19 +1,20 @@ package mock import ( + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" ) // TransactionSimulatorStub - type TransactionSimulatorStub struct { - ProcessTxCalled func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) + ProcessTxCalled func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) } // ProcessTx - -func (tss *TransactionSimulatorStub) ProcessTx(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { +func (tss *TransactionSimulatorStub) ProcessTx(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { if tss.ProcessTxCalled != nil { - return tss.ProcessTxCalled(tx) + return tss.ProcessTxCalled(tx, currentHeader) } return nil, nil diff --git a/process/mock/validatorsProviderStub.go b/process/mock/validatorsProviderStub.go deleted file mode 100644 index 98ea652340b..00000000000 --- a/process/mock/validatorsProviderStub.go +++ /dev/null @@ -1,28 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data/validator" -) - -// ValidatorsProviderStub - -type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics -} - -// GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { - if vp.GetLatestValidatorsCalled != nil { - return vp.GetLatestValidatorsCalled() - } - return nil -} - -// Close - -func (vp *ValidatorsProviderStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { - return vp == nil -} diff --git a/process/peer/interface.go b/process/peer/interface.go index 94377bfdd53..2a8a447e694 100644 --- a/process/peer/interface.go +++ b/process/peer/interface.go @@ -2,6 +2,8 @@ package peer import ( "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" ) // DataPool indicates the main functionality needed in order to fetch the required blocks from the pool @@ -9,3 +11,12 @@ type DataPool interface { Headers() dataRetriever.HeadersPool IsInterfaceNil() bool } + +// StakingDataProviderAPI is able to provide staking data from the system smart contracts +type StakingDataProviderAPI interface { + ComputeUnQualifiedNodes(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) + FillValidatorInfo(validator state.ValidatorInfoHandler) error + GetOwnersData() map[string]*epochStart.OwnerData + Clean() + IsInterfaceNil() bool +} diff --git a/process/peer/process.go b/process/peer/process.go index 5c3364fe5f7..20e0b377365 100644 --- a/process/peer/process.go +++ b/process/peer/process.go @@ -13,8 +13,11 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/marshal" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/common/validatorInfo" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" @@ -23,7 +26,6 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/state/parsers" - logger "github.com/multiversx/mx-chain-logger-go" ) var log = logger.GetOrCreate("process/peer") @@ -196,6 +198,18 @@ func (vs *validatorStatistics) saveNodesCoordinatorUpdates(epoch uint32) (bool, } nodeForcedToRemain = nodeForcedToRemain || tmpNodeForcedToRemain + if vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + nodesMap, err = vs.nodesCoordinator.GetShuffledOutToAuctionValidatorsPublicKeys(epoch) + if err != nil { + return false, err + } + + _, err = vs.saveUpdatesForNodesMap(nodesMap, common.AuctionList) + if err != nil { + return false, err + } + } + return nodeForcedToRemain, nil } @@ -238,12 +252,16 @@ func (vs *validatorStatistics) saveUpdatesForList( isNodeLeaving := (peerType == common.WaitingList || peerType == common.EligibleList) && peerAcc.GetList() == string(common.LeavingList) isNodeWithLowRating := vs.isValidatorWithLowRating(peerAcc) isNodeJailed := vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) && peerType == common.InactiveList && isNodeWithLowRating + isStakingV4Started := vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) if isNodeJailed { - peerAcc.SetListAndIndex(shardID, string(common.JailedList), uint32(index)) + peerAcc.SetListAndIndex(shardID, string(common.JailedList), uint32(index), isStakingV4Started) } else if isNodeLeaving { - peerAcc.SetListAndIndex(shardID, string(common.LeavingList), uint32(index)) + peerAcc.SetListAndIndex(shardID, string(common.LeavingList), uint32(index), isStakingV4Started) + if isStakingV4Started { + peerAcc.SetPreviousList(string(peerType)) + } } else { - peerAcc.SetListAndIndex(shardID, string(peerType), uint32(index)) + peerAcc.SetListAndIndex(shardID, string(peerType), uint32(index), isStakingV4Started) } err = vs.peerAdapter.SaveAccount(peerAcc) @@ -444,23 +462,19 @@ func (vs *validatorStatistics) RootHash() ([]byte, error) { func (vs *validatorStatistics) getValidatorDataFromLeaves( leavesChannels *common.TrieIteratorChannels, -) (map[uint32][]*state.ValidatorInfo, error) { - - validators := make(map[uint32][]*state.ValidatorInfo, vs.shardCoordinator.NumberOfShards()+1) - for i := uint32(0); i < vs.shardCoordinator.NumberOfShards(); i++ { - validators[i] = make([]*state.ValidatorInfo, 0) - } - validators[core.MetachainShardId] = make([]*state.ValidatorInfo, 0) - +) (state.ShardValidatorsInfoMapHandler, error) { + validators := state.NewShardValidatorsInfoMap() for pa := range leavesChannels.LeavesChan { peerAccount, err := vs.unmarshalPeer(pa) if err != nil { return nil, err } - currentShardId := peerAccount.GetShardId() validatorInfoData := vs.PeerAccountToValidatorInfo(peerAccount) - validators[currentShardId] = append(validators[currentShardId], validatorInfoData) + err = validators.Add(validatorInfoData) + if err != nil { + return nil, err + } } err := leavesChannels.ErrChan.ReadFromChanNonBlocking() @@ -503,7 +517,9 @@ func (vs *validatorStatistics) PeerAccountToValidatorInfo(peerAccount state.Peer PublicKey: peerAccount.AddressBytes(), ShardId: peerAccount.GetShardId(), List: list, + PreviousList: peerAccount.GetPreviousList(), Index: peerAccount.GetIndexInList(), + PreviousIndex: peerAccount.GetPreviousIndexInList(), TempRating: peerAccount.GetTempRating(), Rating: peerAccount.GetRating(), RatingModifier: ratingModifier, @@ -555,7 +571,7 @@ func (vs *validatorStatistics) jailValidatorIfBadRatingAndInactive(validatorAcco return } - validatorAccount.SetListAndIndex(validatorAccount.GetShardId(), string(common.JailedList), validatorAccount.GetIndexInList()) + validatorAccount.SetListAndIndex(validatorAccount.GetShardId(), string(common.JailedList), validatorAccount.GetIndexInList(), vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) } func (vs *validatorStatistics) unmarshalPeer(peerAccountData core.KeyValueHolder) (state.PeerAccountHandler, error) { @@ -571,7 +587,7 @@ func (vs *validatorStatistics) unmarshalPeer(peerAccountData core.KeyValueHolder } // GetValidatorInfoForRootHash returns all the peer accounts from the trie with the given rootHash -func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { +func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { sw := core.NewStopWatch() sw.Start("GetValidatorInfoForRootHash") defer func() { @@ -598,10 +614,10 @@ func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (map // ProcessRatingsEndOfEpoch makes end of epoch process on the rating func (vs *validatorStatistics) ProcessRatingsEndOfEpoch( - validatorInfos map[uint32][]*state.ValidatorInfo, + validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32, ) error { - if len(validatorInfos) == 0 { + if validatorInfos == nil || len(validatorInfos.GetAllValidatorsInfo()) == 0 { return process.ErrNilValidatorInfos } @@ -610,14 +626,14 @@ func (vs *validatorStatistics) ProcessRatingsEndOfEpoch( } signedThreshold := vs.rater.GetSignedBlocksThreshold() - for shardId, validators := range validatorInfos { + for shardId, validators := range validatorInfos.GetShardValidatorsInfoMap() { for _, validator := range validators { if !vs.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { - if validator.List != string(common.EligibleList) { + if validator.GetList() != string(common.EligibleList) { continue } } else { - if validator.List != string(common.EligibleList) && !validatorInfo.WasLeavingEligibleInCurrentEpoch(validator) { + if validator.GetList() != string(common.EligibleList) && !validatorInfo.WasLeavingEligibleInCurrentEpoch(validator) { continue } } @@ -633,7 +649,7 @@ func (vs *validatorStatistics) ProcessRatingsEndOfEpoch( } func (vs *validatorStatistics) verifySignaturesBelowSignedThreshold( - validator *state.ValidatorInfo, + validator state.ValidatorInfoHandler, signedThreshold float32, shardId uint32, epoch uint32, @@ -642,19 +658,19 @@ func (vs *validatorStatistics) verifySignaturesBelowSignedThreshold( return nil } - validatorOccurrences := core.MaxUint32(1, validator.ValidatorSuccess+validator.ValidatorFailure+validator.ValidatorIgnoredSignatures) - computedThreshold := float32(validator.ValidatorSuccess) / float32(validatorOccurrences) + validatorOccurrences := core.MaxUint32(1, validator.GetValidatorSuccess()+validator.GetValidatorFailure()+validator.GetValidatorIgnoredSignatures()) + computedThreshold := float32(validator.GetValidatorSuccess()) / float32(validatorOccurrences) if computedThreshold <= signedThreshold { increasedRatingTimes := uint32(0) if !vs.enableEpochsHandler.IsFlagEnabled(common.BelowSignedThresholdFlag) { - increasedRatingTimes = validator.ValidatorFailure + increasedRatingTimes = validator.GetValidatorFailure() } else { - increasedRatingTimes = validator.ValidatorSuccess + validator.ValidatorIgnoredSignatures + increasedRatingTimes = validator.GetValidatorSuccess() + validator.GetValidatorIgnoredSignatures() } - newTempRating := vs.rater.RevertIncreaseValidator(shardId, validator.TempRating, increasedRatingTimes) - pa, err := vs.loadPeerAccount(validator.PublicKey) + newTempRating := vs.rater.RevertIncreaseValidator(shardId, validator.GetTempRating(), increasedRatingTimes) + pa, err := vs.loadPeerAccount(validator.GetPublicKey()) if err != nil { return err } @@ -667,23 +683,23 @@ func (vs *validatorStatistics) verifySignaturesBelowSignedThreshold( } log.Debug("below signed blocks threshold", - "pk", validator.PublicKey, + "pk", validator.GetPublicKey(), "signed %", computedThreshold, - "validatorSuccess", validator.ValidatorSuccess, - "validatorFailure", validator.ValidatorFailure, - "validatorIgnored", validator.ValidatorIgnoredSignatures, + "validatorSuccess", validator.GetValidatorSuccess(), + "validatorFailure", validator.GetValidatorFailure(), + "validatorIgnored", validator.GetValidatorIgnoredSignatures(), "new tempRating", newTempRating, - "old tempRating", validator.TempRating, + "old tempRating", validator.GetTempRating(), ) - validator.TempRating = newTempRating + validator.SetTempRating(newTempRating) } return nil } // ResetValidatorStatisticsAtNewEpoch resets the validator info at the start of a new epoch -func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { +func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos state.ShardValidatorsInfoMapHandler) error { sw := core.NewStopWatch() sw.Start("ResetValidatorStatisticsAtNewEpoch") defer func() { @@ -691,24 +707,22 @@ func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos map[uin log.Debug("ResetValidatorStatisticsAtNewEpoch", sw.GetMeasurements()...) }() - for _, validators := range vInfos { - for _, validator := range validators { - account, err := vs.peerAdapter.LoadAccount(validator.GetPublicKey()) - if err != nil { - return err - } + for _, validator := range vInfos.GetAllValidatorsInfo() { + account, err := vs.peerAdapter.LoadAccount(validator.GetPublicKey()) + if err != nil { + return err + } - peerAccount, ok := account.(state.PeerAccountHandler) - if !ok { - return process.ErrWrongTypeAssertion - } - peerAccount.ResetAtNewEpoch() - vs.setToJailedIfNeeded(peerAccount, validator) + peerAccount, ok := account.(state.PeerAccountHandler) + if !ok { + return process.ErrWrongTypeAssertion + } + peerAccount.ResetAtNewEpoch() + vs.setToJailedIfNeeded(peerAccount, validator) - err = vs.peerAdapter.SaveAccount(peerAccount) - if err != nil { - return err - } + err = vs.peerAdapter.SaveAccount(peerAccount) + if err != nil { + return err } } @@ -717,23 +731,23 @@ func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos map[uin func (vs *validatorStatistics) setToJailedIfNeeded( peerAccount state.PeerAccountHandler, - validator *state.ValidatorInfo, + validator state.ValidatorInfoHandler, ) { if !vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) { return } - if validator.List == string(common.WaitingList) || validator.List == string(common.EligibleList) { + if validator.GetList() == string(common.WaitingList) || validator.GetList() == string(common.EligibleList) { return } - if validator.List == string(common.JailedList) && peerAccount.GetList() != string(common.JailedList) { - peerAccount.SetListAndIndex(validator.ShardId, string(common.JailedList), validator.Index) + if validator.GetList() == string(common.JailedList) && peerAccount.GetList() != string(common.JailedList) { + peerAccount.SetListAndIndex(validator.GetShardId(), string(common.JailedList), validator.GetIndex(), vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) return } if vs.isValidatorWithLowRating(peerAccount) { - peerAccount.SetListAndIndex(validator.ShardId, string(common.JailedList), validator.Index) + peerAccount.SetListAndIndex(validator.GetShardId(), string(common.JailedList), validator.GetIndex(), vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) } } @@ -874,10 +888,10 @@ func (vs *validatorStatistics) decreaseForConsensusValidators( } // RevertPeerState takes the current and previous headers and undos the peer state -// -// for all of the consensus members +// for all of the consensus members func (vs *validatorStatistics) RevertPeerState(header data.MetaHeaderHandler) error { - return vs.peerAdapter.RecreateTrie(header.GetValidatorStatsRootHash()) + rootHashHolder := holders.NewDefaultRootHashesHolder(header.GetValidatorStatsRootHash()) + return vs.peerAdapter.RecreateTrie(rootHashHolder) } func (vs *validatorStatistics) updateShardDataPeerState( @@ -994,7 +1008,7 @@ func (vs *validatorStatistics) savePeerAccountData( peerAccount.SetRating(startRating) peerAccount.SetTempRating(startRating) - peerAccount.SetListAndIndex(shardID, string(peerType), index) + peerAccount.SetListAndIndex(shardID, string(peerType), index, vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) return vs.peerAdapter.SaveAccount(peerAccount) } diff --git a/process/peer/process_test.go b/process/peer/process_test.go index daa885cff3a..d4c85a5601f 100644 --- a/process/peer/process_test.go +++ b/process/peer/process_test.go @@ -10,9 +10,14 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/keyValStorage" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" @@ -28,12 +33,10 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const ( @@ -99,10 +102,9 @@ func createMockArguments() peer.ArgValidatorStatisticsProcessor { MaxGasPriceSetGuardian: "100000", }, }, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -122,7 +124,7 @@ func createMockArguments() peer.ArgValidatorStatisticsProcessor { RewardsHandler: economicsData, MaxComputableRounds: 1000, MaxConsecutiveRoundsOfRatingDecrease: 2000, - NodesSetup: &mock.NodesSetupStub{}, + NodesSetup: &genesisMocks.NodesSetupStub{}, EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SwitchJailWaitingFlag, common.BelowSignedThresholdFlag), } return arguments @@ -312,7 +314,7 @@ func TestValidatorStatisticsProcessor_SaveInitialStateErrOnGetAccountFail(t *tes arguments := createMockArguments() arguments.PeerAdapter = peerAdapters - arguments.NodesSetup = &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + arguments.NodesSetup = &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) oneMap[0] = append(oneMap[0], mock.NewNodeInfo([]byte("aaaa"), []byte("aaaa"), 0, 50)) return oneMap, oneMap @@ -334,7 +336,7 @@ func TestValidatorStatisticsProcessor_SaveInitialStateGetAccountReturnsInvalid(t arguments := createMockArguments() arguments.PeerAdapter = peerAdapter - arguments.NodesSetup = &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + arguments.NodesSetup = &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) oneMap[0] = append(oneMap[0], mock.NewNodeInfo([]byte("aaaa"), []byte("aaaa"), 0, 50)) return oneMap, oneMap @@ -359,7 +361,7 @@ func TestValidatorStatisticsProcessor_SaveInitialStateSetAddressErrors(t *testin } arguments := createMockArguments() - arguments.NodesSetup = &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + arguments.NodesSetup = &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) oneMap[0] = append(oneMap[0], mock.NewNodeInfo([]byte("aaaa"), []byte("aaaa"), 0, 50)) return oneMap, oneMap @@ -2073,9 +2075,9 @@ func TestValidatorStatistics_Process(t *testing.T) { validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) validatorInfos, _ := validatorStatistics.GetValidatorInfoForRootHash(hash) - vi0 := validatorInfos[0][0] + vi0 := validatorInfos.GetShardValidatorsInfoMap()[0][0] newTempRating := uint32(25) - vi0.TempRating = newTempRating + vi0.SetTempRating(newTempRating) assert.NotEqual(t, newTempRating, pa0.GetRating()) @@ -2145,10 +2147,10 @@ func TestValidatorStatistics_GetValidatorInfoForRootHash(t *testing.T) { validatorInfos, err := validatorStatistics.GetValidatorInfoForRootHash(hash) assert.NotNil(t, validatorInfos) assert.Nil(t, err) - assert.Equal(t, uint32(0), validatorInfos[0][0].ShardId) - compare(t, pa0, validatorInfos[0][0]) - assert.Equal(t, core.MetachainShardId, validatorInfos[core.MetachainShardId][0].ShardId) - compare(t, paMeta, validatorInfos[core.MetachainShardId][0]) + assert.Equal(t, uint32(0), validatorInfos.GetShardValidatorsInfoMap()[0][0].GetShardId()) + compare(t, pa0, validatorInfos.GetShardValidatorsInfoMap()[0][0]) + assert.Equal(t, core.MetachainShardId, validatorInfos.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetShardId()) + compare(t, paMeta, validatorInfos.GetShardValidatorsInfoMap()[core.MetachainShardId][0]) }) } @@ -2161,7 +2163,7 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNilMapShouldErr( err := validatorStatistics.ProcessRatingsEndOfEpoch(nil, 1) assert.Equal(t, process.ErrNilValidatorInfos, err) - vi := make(map[uint32][]*state.ValidatorInfo) + vi := state.NewShardValidatorsInfoMap() err = validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Equal(t, process.ErrNilValidatorInfos, err) } @@ -2179,9 +2181,8 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNoValidatorFailu tempRating1 := uint32(75) tempRating2 := uint32(80) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = &state.ValidatorInfo{ + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(&state.ValidatorInfo{ PublicKey: nil, ShardId: core.MetachainShardId, List: "", @@ -2195,12 +2196,10 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNoValidatorFailu ValidatorFailure: 0, NumSelectedInSuccessBlocks: 20, AccumulatedFees: nil, - } - - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = &state.ValidatorInfo{ + }) + _ = vi.Add(&state.ValidatorInfo{ PublicKey: nil, - ShardId: core.MetachainShardId, + ShardId: 0, List: "", Index: 0, TempRating: tempRating2, @@ -2212,12 +2211,12 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNoValidatorFailu ValidatorFailure: 0, NumSelectedInSuccessBlocks: 20, AccumulatedFees: nil, - } + }) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) - assert.Equal(t, tempRating1, vi[core.MetachainShardId][0].TempRating) - assert.Equal(t, tempRating2, vi[0][0].TempRating) + assert.Equal(t, tempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) + assert.Equal(t, tempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithSmallValidatorFailureShouldWork(t *testing.T) { @@ -2246,18 +2245,16 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithSmallValidatorFa validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1)) + _ = vi.Add(createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2)) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) expectedTempRating1 := tempRating1 - uint32(rater.MetaIncreaseValidator)*(validatorSuccess1+validatorIgnored1) - assert.Equal(t, expectedTempRating1, vi[core.MetachainShardId][0].TempRating) + assert.Equal(t, expectedTempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) expectedTempRating2 := tempRating2 - uint32(rater.IncreaseValidator)*(validatorSuccess2+validatorIgnored2) - assert.Equal(t, expectedTempRating2, vi[0][0].TempRating) + assert.Equal(t, expectedTempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochComputesJustEligible(t *testing.T) { @@ -2287,20 +2284,19 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochComputesJustEligible validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1)) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) - vi[0][0].List = string(common.WaitingList) + validatorWaiting := createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + validatorWaiting.SetList(string(common.WaitingList)) + _ = vi.Add(validatorWaiting) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) expectedTempRating1 := tempRating1 - uint32(rater.MetaIncreaseValidator)*(validatorSuccess1+validatorIgnored1) - assert.Equal(t, expectedTempRating1, vi[core.MetachainShardId][0].TempRating) + assert.Equal(t, expectedTempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) - assert.Equal(t, tempRating2, vi[0][0].TempRating) + assert.Equal(t, tempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochV2ComputesEligibleLeaving(t *testing.T) { @@ -2332,21 +2328,21 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochV2ComputesEligibleLe validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) - vi[core.MetachainShardId][0].List = string(common.LeavingList) + vi := state.NewShardValidatorsInfoMap() + validatorLeaving := createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) + validatorLeaving.SetList(string(common.LeavingList)) + _ = vi.Add(validatorLeaving) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) - vi[0][0].List = string(common.WaitingList) + validatorWaiting := createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + validatorWaiting.SetList(string(common.WaitingList)) + _ = vi.Add(validatorWaiting) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) expectedTempRating1 := tempRating1 - uint32(rater.MetaIncreaseValidator)*(validatorSuccess1+validatorIgnored1) - assert.Equal(t, expectedTempRating1, vi[core.MetachainShardId][0].TempRating) + assert.Equal(t, expectedTempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) - assert.Equal(t, tempRating2, vi[0][0].TempRating) + assert.Equal(t, tempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithLargeValidatorFailureBelowMinRatingShouldWork(t *testing.T) { @@ -2374,18 +2370,16 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithLargeValidatorFa validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1)) + _ = vi.Add(createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2)) validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) - assert.Equal(t, rater.MinRating, vi[core.MetachainShardId][0].TempRating) - assert.Equal(t, rater.MinRating, vi[0][0].TempRating) + assert.Equal(t, rater.MinRating, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) + assert.Equal(t, rater.MinRating, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorsProvider_PeerAccoutToValidatorInfo(t *testing.T) { @@ -2485,26 +2479,26 @@ func createMockValidatorInfo(shardId uint32, tempRating uint32, validatorSuccess } } -func compare(t *testing.T, peerAccount state.PeerAccountHandler, validatorInfo *state.ValidatorInfo) { - assert.Equal(t, peerAccount.GetShardId(), validatorInfo.ShardId) - assert.Equal(t, peerAccount.GetRating(), validatorInfo.Rating) - assert.Equal(t, peerAccount.GetTempRating(), validatorInfo.TempRating) - assert.Equal(t, peerAccount.AddressBytes(), validatorInfo.PublicKey) - assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumFailure(), validatorInfo.ValidatorFailure) - assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumSuccess(), validatorInfo.ValidatorSuccess) - assert.Equal(t, peerAccount.GetValidatorIgnoredSignaturesRate(), validatorInfo.ValidatorIgnoredSignatures) - assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumFailure(), validatorInfo.LeaderFailure) - assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumSuccess(), validatorInfo.LeaderSuccess) - assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumFailure(), validatorInfo.TotalValidatorFailure) - assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumSuccess(), validatorInfo.TotalValidatorSuccess) - assert.Equal(t, peerAccount.GetTotalValidatorIgnoredSignaturesRate(), validatorInfo.TotalValidatorIgnoredSignatures) - assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumFailure(), validatorInfo.TotalLeaderFailure) - assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumSuccess(), validatorInfo.TotalLeaderSuccess) - assert.Equal(t, peerAccount.GetList(), validatorInfo.List) - assert.Equal(t, peerAccount.GetIndexInList(), validatorInfo.Index) - assert.Equal(t, peerAccount.GetRewardAddress(), validatorInfo.RewardAddress) - assert.Equal(t, peerAccount.GetAccumulatedFees(), validatorInfo.AccumulatedFees) - assert.Equal(t, peerAccount.GetNumSelectedInSuccessBlocks(), validatorInfo.NumSelectedInSuccessBlocks) +func compare(t *testing.T, peerAccount state.PeerAccountHandler, validatorInfo state.ValidatorInfoHandler) { + assert.Equal(t, peerAccount.GetShardId(), validatorInfo.GetShardId()) + assert.Equal(t, peerAccount.GetRating(), validatorInfo.GetRating()) + assert.Equal(t, peerAccount.GetTempRating(), validatorInfo.GetTempRating()) + assert.Equal(t, peerAccount.AddressBytes(), validatorInfo.GetPublicKey()) + assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumFailure(), validatorInfo.GetValidatorFailure()) + assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumSuccess(), validatorInfo.GetValidatorSuccess()) + assert.Equal(t, peerAccount.GetValidatorIgnoredSignaturesRate(), validatorInfo.GetValidatorIgnoredSignatures()) + assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumFailure(), validatorInfo.GetLeaderFailure()) + assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumSuccess(), validatorInfo.GetLeaderSuccess()) + assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumFailure(), validatorInfo.GetTotalValidatorFailure()) + assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumSuccess(), validatorInfo.GetTotalValidatorSuccess()) + assert.Equal(t, peerAccount.GetTotalValidatorIgnoredSignaturesRate(), validatorInfo.GetTotalValidatorIgnoredSignatures()) + assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumFailure(), validatorInfo.GetTotalLeaderFailure()) + assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumSuccess(), validatorInfo.GetTotalLeaderSuccess()) + assert.Equal(t, peerAccount.GetList(), validatorInfo.GetList()) + assert.Equal(t, peerAccount.GetIndexInList(), validatorInfo.GetIndex()) + assert.Equal(t, peerAccount.GetRewardAddress(), validatorInfo.GetRewardAddress()) + assert.Equal(t, peerAccount.GetAccumulatedFees(), validatorInfo.GetAccumulatedFees()) + assert.Equal(t, peerAccount.GetNumSelectedInSuccessBlocks(), validatorInfo.GetNumSelectedInSuccessBlocks()) } func createPeerAccounts(addrBytes0 []byte, addrBytesMeta []byte) (state.PeerAccountHandler, state.PeerAccountHandler) { @@ -2655,6 +2649,121 @@ func TestValidatorStatisticsProcessor_SaveNodesCoordinatorUpdates(t *testing.T) assert.False(t, nodeForcedToRemain) } +func TestValidatorStatisticsProcessor_SaveNodesCoordinatorUpdatesWithStakingV4(t *testing.T) { + t.Parallel() + + peerAdapter := getAccountsMock() + arguments := createMockArguments() + arguments.PeerAdapter = peerAdapter + + pk0 := []byte("pk0") + pk1 := []byte("pk1") + pk2 := []byte("pk2") + + account0, _ := accounts.NewPeerAccount(pk0) + account1, _ := accounts.NewPeerAccount(pk1) + account2, _ := accounts.NewPeerAccount(pk2) + + ctLoadAccount := &atomic.Counter{} + ctSaveAccount := &atomic.Counter{} + + peerAdapter.LoadAccountCalled = func(address []byte) (vmcommon.AccountHandler, error) { + ctLoadAccount.Increment() + + switch string(address) { + case string(pk0): + return account0, nil + case string(pk1): + return account1, nil + case string(pk2): + return account2, nil + default: + require.Fail(t, "should not have called this for other address") + return nil, nil + } + } + peerAdapter.SaveAccountCalled = func(account vmcommon.AccountHandler) error { + ctSaveAccount.Increment() + peerAccount := account.(state.PeerAccountHandler) + require.Equal(t, uint32(0), peerAccount.GetIndexInList()) + + switch string(account.AddressBytes()) { + case string(pk0): + require.Equal(t, string(common.EligibleList), peerAccount.GetList()) + require.Equal(t, uint32(0), peerAccount.GetShardId()) + return nil + case string(pk1): + require.Equal(t, string(common.AuctionList), peerAccount.GetList()) + require.Equal(t, uint32(0), peerAccount.GetShardId()) + return nil + case string(pk2): + require.Equal(t, string(common.AuctionList), peerAccount.GetList()) + require.Equal(t, core.MetachainShardId, peerAccount.GetShardId()) + return nil + default: + require.Fail(t, "should not have called this for other account") + return nil + } + } + + arguments.NodesCoordinator = &shardingMocks.NodesCoordinatorMock{ + GetAllEligibleValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + mapNodes := map[uint32][][]byte{ + 0: {pk0}, + } + return mapNodes, nil + }, + GetAllShuffledOutValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + mapNodes := map[uint32][][]byte{ + 0: {pk1}, + core.MetachainShardId: {pk2}, + } + return mapNodes, nil + }, + GetShuffledOutToAuctionValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + mapNodes := map[uint32][][]byte{ + 0: {pk1}, + core.MetachainShardId: {pk2}, + } + return mapNodes, nil + }, + } + stakingV4Step2EnableEpochCalledCt := 0 + arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.StakingV4Step2Flag { + stakingV4Step2EnableEpochCalledCt++ + switch stakingV4Step2EnableEpochCalledCt { + case 1: + return false + case 2: + return true + default: + require.Fail(t, "should only call this twice") + } + } + + return false + }, + } + + validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) + nodeForcedToRemain, err := validatorStatistics.SaveNodesCoordinatorUpdates(0) + require.Nil(t, err) + require.False(t, nodeForcedToRemain) + require.Equal(t, int64(1), ctSaveAccount.Get()) + require.Equal(t, int64(1), ctLoadAccount.Get()) + + ctSaveAccount.Reset() + ctLoadAccount.Reset() + + nodeForcedToRemain, err = validatorStatistics.SaveNodesCoordinatorUpdates(0) + require.Nil(t, err) + require.False(t, nodeForcedToRemain) + require.Equal(t, int64(3), ctSaveAccount.Get()) + require.Equal(t, int64(3), ctLoadAccount.Get()) +} + func TestValidatorStatisticsProcessor_getActualList(t *testing.T) { t.Parallel() diff --git a/process/peer/ratingReader.go b/process/peer/ratingReader.go index 4a8c8f1c5be..83f236b3869 100644 --- a/process/peer/ratingReader.go +++ b/process/peer/ratingReader.go @@ -5,13 +5,13 @@ type RatingReader struct { getRating func(string) uint32 } -//GetRating returns the Rating for the specified public key +// GetRating returns the Rating for the specified public key func (bsr *RatingReader) GetRating(pk string) uint32 { rating := bsr.getRating(pk) return rating } -//IsInterfaceNil checks if the underlying object is nil +// IsInterfaceNil checks if the underlying object is nil func (bsr *RatingReader) IsInterfaceNil() bool { return bsr == nil } diff --git a/process/peer/validatorsProvider.go b/process/peer/validatorsProvider.go index 6ab8d0ac49b..a7aa60ea7f5 100644 --- a/process/peer/validatorsProvider.go +++ b/process/peer/validatorsProvider.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" @@ -24,14 +25,22 @@ type validatorsProvider struct { nodesCoordinator process.NodesCoordinator validatorStatistics process.ValidatorStatisticsProcessor cache map[string]*validator.ValidatorStatistics + cachedAuctionValidators []*common.AuctionListValidatorAPIResponse + cachedRandomness []byte cacheRefreshIntervalDuration time.Duration refreshCache chan uint32 lastCacheUpdate time.Time + lastAuctionCacheUpdate time.Time lock sync.RWMutex + auctionMutex sync.RWMutex cancelFunc func() - pubkeyConverter core.PubkeyConverter - maxRating uint32 - currentEpoch uint32 + validatorPubKeyConverter core.PubkeyConverter + addressPubKeyConverter core.PubkeyConverter + stakingDataProvider StakingDataProviderAPI + auctionListSelector epochStart.AuctionListSelector + + maxRating uint32 + currentEpoch uint32 } // ArgValidatorsProvider contains all parameters needed for creating a validatorsProvider @@ -40,7 +49,10 @@ type ArgValidatorsProvider struct { EpochStartEventNotifier process.EpochStartEventNotifier CacheRefreshIntervalDurationInSec time.Duration ValidatorStatistics process.ValidatorStatisticsProcessor - PubKeyConverter core.PubkeyConverter + ValidatorPubKeyConverter core.PubkeyConverter + AddressPubKeyConverter core.PubkeyConverter + StakingDataProvider StakingDataProviderAPI + AuctionListSelector epochStart.AuctionListSelector StartEpoch uint32 MaxRating uint32 } @@ -53,8 +65,11 @@ func NewValidatorsProvider( if check.IfNil(args.ValidatorStatistics) { return nil, process.ErrNilValidatorStatistics } - if check.IfNil(args.PubKeyConverter) { - return nil, process.ErrNilPubkeyConverter + if check.IfNil(args.ValidatorPubKeyConverter) { + return nil, fmt.Errorf("%w for validators", process.ErrNilPubkeyConverter) + } + if check.IfNil(args.AddressPubKeyConverter) { + return nil, fmt.Errorf("%w for addresses", process.ErrNilPubkeyConverter) } if check.IfNil(args.NodesCoordinator) { return nil, process.ErrNilNodesCoordinator @@ -62,6 +77,12 @@ func NewValidatorsProvider( if check.IfNil(args.EpochStartEventNotifier) { return nil, process.ErrNilEpochStartNotifier } + if check.IfNil(args.StakingDataProvider) { + return nil, process.ErrNilStakingDataProvider + } + if check.IfNil(args.AuctionListSelector) { + return nil, epochStart.ErrNilAuctionListSelector + } if args.MaxRating == 0 { return nil, process.ErrMaxRatingZero } @@ -74,14 +95,20 @@ func NewValidatorsProvider( valProvider := &validatorsProvider{ nodesCoordinator: args.NodesCoordinator, validatorStatistics: args.ValidatorStatistics, + stakingDataProvider: args.StakingDataProvider, cache: make(map[string]*validator.ValidatorStatistics), + cachedAuctionValidators: make([]*common.AuctionListValidatorAPIResponse, 0), + cachedRandomness: make([]byte, 0), cacheRefreshIntervalDuration: args.CacheRefreshIntervalDurationInSec, refreshCache: make(chan uint32), lock: sync.RWMutex{}, + auctionMutex: sync.RWMutex{}, cancelFunc: cancelfunc, maxRating: args.MaxRating, - pubkeyConverter: args.PubKeyConverter, + validatorPubKeyConverter: args.ValidatorPubKeyConverter, + addressPubKeyConverter: args.AddressPubKeyConverter, currentEpoch: args.StartEpoch, + auctionListSelector: args.AuctionListSelector, } go valProvider.startRefreshProcess(currentContext) @@ -92,13 +119,7 @@ func NewValidatorsProvider( // GetLatestValidators gets the latest configuration of validators from the peerAccountsTrie func (vp *validatorsProvider) GetLatestValidators() map[string]*validator.ValidatorStatistics { - vp.lock.RLock() - shouldUpdate := time.Since(vp.lastCacheUpdate) > vp.cacheRefreshIntervalDuration - vp.lock.RUnlock() - - if shouldUpdate { - vp.updateCache() - } + vp.updateCacheIfNeeded() vp.lock.RLock() clonedMap := cloneMap(vp.cache) @@ -107,6 +128,17 @@ func (vp *validatorsProvider) GetLatestValidators() map[string]*validator.Valida return clonedMap } +func (vp *validatorsProvider) updateCacheIfNeeded() { + vp.lock.Lock() + defer vp.lock.Unlock() + + shouldUpdate := time.Since(vp.lastCacheUpdate) > vp.cacheRefreshIntervalDuration + + if shouldUpdate { + vp.updateCache() + } +} + func cloneMap(cache map[string]*validator.ValidatorStatistics) map[string]*validator.ValidatorStatistics { newMap := make(map[string]*validator.ValidatorStatistics) @@ -161,7 +193,10 @@ func (vp *validatorsProvider) epochStartEventHandler() nodesCoordinator.EpochSta func (vp *validatorsProvider) startRefreshProcess(ctx context.Context) { for { + vp.lock.Lock() vp.updateCache() + vp.lock.Unlock() + select { case epoch := <-vp.refreshCache: vp.lock.Lock() @@ -175,6 +210,7 @@ func (vp *validatorsProvider) startRefreshProcess(ctx context.Context) { } } +// this func should be called under mutex protection func (vp *validatorsProvider) updateCache() { lastFinalizedRootHash := vp.validatorStatistics.LastFinalizedRootHash() if len(lastFinalizedRootHash) == 0 { @@ -182,65 +218,60 @@ func (vp *validatorsProvider) updateCache() { } allNodes, err := vp.validatorStatistics.GetValidatorInfoForRootHash(lastFinalizedRootHash) if err != nil { + allNodes = state.NewShardValidatorsInfoMap() log.Trace("validatorsProvider - GetLatestValidatorInfos failed", "error", err) } - vp.lock.RLock() epoch := vp.currentEpoch - vp.lock.RUnlock() newCache := vp.createNewCache(epoch, allNodes) - vp.lock.Lock() vp.lastCacheUpdate = time.Now() vp.cache = newCache - vp.lock.Unlock() } func (vp *validatorsProvider) createNewCache( epoch uint32, - allNodes map[uint32][]*state.ValidatorInfo, + allNodes state.ShardValidatorsInfoMapHandler, ) map[string]*validator.ValidatorStatistics { newCache := vp.createValidatorApiResponseMapFromValidatorInfoMap(allNodes) nodesMapEligible, err := vp.nodesCoordinator.GetAllEligibleValidatorsPublicKeys(epoch) if err != nil { - log.Debug("validatorsProvider - GetAllEligibleValidatorsPublicKeys failed", "epoch", epoch) + log.Debug("validatorsProvider - GetAllEligibleValidatorsPublicKeys failed", "epoch", epoch, "error", err) } vp.aggregateLists(newCache, nodesMapEligible, common.EligibleList) nodesMapWaiting, err := vp.nodesCoordinator.GetAllWaitingValidatorsPublicKeys(epoch) if err != nil { - log.Debug("validatorsProvider - GetAllWaitingValidatorsPublicKeys failed", "epoch", epoch) + log.Debug("validatorsProvider - GetAllWaitingValidatorsPublicKeys failed", "epoch", epoch, "error", err) } vp.aggregateLists(newCache, nodesMapWaiting, common.WaitingList) return newCache } -func (vp *validatorsProvider) createValidatorApiResponseMapFromValidatorInfoMap(allNodes map[uint32][]*state.ValidatorInfo) map[string]*validator.ValidatorStatistics { +func (vp *validatorsProvider) createValidatorApiResponseMapFromValidatorInfoMap(allNodes state.ShardValidatorsInfoMapHandler) map[string]*validator.ValidatorStatistics { newCache := make(map[string]*validator.ValidatorStatistics) - for _, validatorInfosInShard := range allNodes { - for _, validatorInfo := range validatorInfosInShard { - strKey := vp.pubkeyConverter.SilentEncode(validatorInfo.PublicKey, log) - - newCache[strKey] = &validator.ValidatorStatistics{ - NumLeaderSuccess: validatorInfo.LeaderSuccess, - NumLeaderFailure: validatorInfo.LeaderFailure, - NumValidatorSuccess: validatorInfo.ValidatorSuccess, - NumValidatorFailure: validatorInfo.ValidatorFailure, - NumValidatorIgnoredSignatures: validatorInfo.ValidatorIgnoredSignatures, - TotalNumLeaderSuccess: validatorInfo.TotalLeaderSuccess, - TotalNumLeaderFailure: validatorInfo.TotalLeaderFailure, - TotalNumValidatorSuccess: validatorInfo.TotalValidatorSuccess, - TotalNumValidatorFailure: validatorInfo.TotalValidatorFailure, - TotalNumValidatorIgnoredSignatures: validatorInfo.TotalValidatorIgnoredSignatures, - RatingModifier: validatorInfo.RatingModifier, - Rating: float32(validatorInfo.Rating) * 100 / float32(vp.maxRating), - TempRating: float32(validatorInfo.TempRating) * 100 / float32(vp.maxRating), - ShardId: validatorInfo.ShardId, - ValidatorStatus: validatorInfo.List, - } + + for _, validatorInfo := range allNodes.GetAllValidatorsInfo() { + strKey := vp.validatorPubKeyConverter.SilentEncode(validatorInfo.GetPublicKey(), log) + newCache[strKey] = &validator.ValidatorStatistics{ + NumLeaderSuccess: validatorInfo.GetLeaderSuccess(), + NumLeaderFailure: validatorInfo.GetLeaderFailure(), + NumValidatorSuccess: validatorInfo.GetValidatorSuccess(), + NumValidatorFailure: validatorInfo.GetValidatorFailure(), + NumValidatorIgnoredSignatures: validatorInfo.GetValidatorIgnoredSignatures(), + TotalNumLeaderSuccess: validatorInfo.GetTotalLeaderSuccess(), + TotalNumLeaderFailure: validatorInfo.GetTotalLeaderFailure(), + TotalNumValidatorSuccess: validatorInfo.GetTotalValidatorSuccess(), + TotalNumValidatorFailure: validatorInfo.GetTotalValidatorFailure(), + TotalNumValidatorIgnoredSignatures: validatorInfo.GetTotalValidatorIgnoredSignatures(), + RatingModifier: validatorInfo.GetRatingModifier(), + Rating: float32(validatorInfo.GetRating()) * 100 / float32(vp.maxRating), + TempRating: float32(validatorInfo.GetTempRating()) * 100 / float32(vp.maxRating), + ShardId: validatorInfo.GetShardId(), + ValidatorStatus: validatorInfo.GetList(), } } @@ -254,8 +285,7 @@ func (vp *validatorsProvider) aggregateLists( ) { for shardID, shardValidators := range validatorsMap { for _, val := range shardValidators { - encodedKey := vp.pubkeyConverter.SilentEncode(val, log) - + encodedKey := vp.validatorPubKeyConverter.SilentEncode(val, log) foundInTrieValidator, ok := newCache[encodedKey] peerType := string(currentList) @@ -288,6 +318,18 @@ func shouldCombine(triePeerType common.PeerType, currentPeerType common.PeerType return isLeaving && isEligibleOrWaiting } +// ForceUpdate will trigger the update process of all caches +func (vp *validatorsProvider) ForceUpdate() error { + vp.lock.Lock() + vp.updateCache() + vp.lock.Unlock() + + vp.auctionMutex.Lock() + defer vp.auctionMutex.Unlock() + + return vp.updateAuctionListCache() +} + // IsInterfaceNil returns true if there is no value under the interface func (vp *validatorsProvider) IsInterfaceNil() bool { return vp == nil diff --git a/process/peer/validatorsProviderAuction.go b/process/peer/validatorsProviderAuction.go new file mode 100644 index 00000000000..a31a89f97e8 --- /dev/null +++ b/process/peer/validatorsProviderAuction.go @@ -0,0 +1,217 @@ +package peer + +import ( + "bytes" + "math/big" + "sort" + "time" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" +) + +// GetAuctionList returns an array containing the validators that are currently in the auction list +func (vp *validatorsProvider) GetAuctionList() ([]*common.AuctionListValidatorAPIResponse, error) { + err := vp.updateAuctionListCacheIfNeeded() + if err != nil { + return nil, err + } + + vp.auctionMutex.RLock() + ret := make([]*common.AuctionListValidatorAPIResponse, len(vp.cachedAuctionValidators)) + copy(ret, vp.cachedAuctionValidators) + vp.auctionMutex.RUnlock() + + return ret, nil +} + +func (vp *validatorsProvider) updateAuctionListCacheIfNeeded() error { + vp.auctionMutex.Lock() + defer vp.auctionMutex.Unlock() + + shouldUpdate := time.Since(vp.lastAuctionCacheUpdate) > vp.cacheRefreshIntervalDuration + + if shouldUpdate { + return vp.updateAuctionListCache() + } + + return nil +} + +// this func should be called under mutex protection +func (vp *validatorsProvider) updateAuctionListCache() error { + rootHash := vp.validatorStatistics.LastFinalizedRootHash() + if len(rootHash) == 0 { + return state.ErrNilRootHash + } + + validatorsMap, err := vp.validatorStatistics.GetValidatorInfoForRootHash(rootHash) + if err != nil { + return err + } + + vp.cachedRandomness = rootHash + + newCache, err := vp.createValidatorsAuctionCache(validatorsMap) + if err != nil { + return err + } + + vp.lastAuctionCacheUpdate = time.Now() + vp.cachedAuctionValidators = newCache + + return nil +} + +func (vp *validatorsProvider) createValidatorsAuctionCache(validatorsMap state.ShardValidatorsInfoMapHandler) ([]*common.AuctionListValidatorAPIResponse, error) { + defer vp.stakingDataProvider.Clean() + + err := vp.fillAllValidatorsInfo(validatorsMap) + if err != nil { + return nil, err + } + + selectedNodes, err := vp.getSelectedNodesFromAuction(validatorsMap) + if err != nil { + return nil, err + } + + auctionListValidators, qualifiedOwners := vp.getAuctionListValidatorsAPIResponse(selectedNodes) + sortList(auctionListValidators, qualifiedOwners) + return auctionListValidators, nil +} + +func (vp *validatorsProvider) fillAllValidatorsInfo(validatorsMap state.ShardValidatorsInfoMapHandler) error { + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + err := vp.stakingDataProvider.FillValidatorInfo(validator) + if err != nil { + return err + } + } + + _, _, err := vp.stakingDataProvider.ComputeUnQualifiedNodes(validatorsMap) + return err +} + +// this func should be called under mutex protection +func (vp *validatorsProvider) getSelectedNodesFromAuction(validatorsMap state.ShardValidatorsInfoMapHandler) ([]state.ValidatorInfoHandler, error) { + randomness := vp.cachedRandomness + + err := vp.auctionListSelector.SelectNodesFromAuctionList(validatorsMap, randomness) + if err != nil { + return nil, err + } + + selectedNodes := make([]state.ValidatorInfoHandler, 0) + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + if validator.GetList() == string(common.SelectedFromAuctionList) { + selectedNodes = append(selectedNodes, validator.ShallowClone()) + } + } + + return selectedNodes, nil +} + +func sortList(list []*common.AuctionListValidatorAPIResponse, qualifiedOwners map[string]bool) { + sort.SliceStable(list, func(i, j int) bool { + qualifiedTopUpValidator1, _ := big.NewInt(0).SetString(list[i].QualifiedTopUp, 10) + qualifiedTopUpValidator2, _ := big.NewInt(0).SetString(list[j].QualifiedTopUp, 10) + if qualifiedTopUpValidator1.Cmp(qualifiedTopUpValidator2) == 0 { + return compareByNumQualified(list[i], list[j], qualifiedOwners) + } + + return qualifiedTopUpValidator1.Cmp(qualifiedTopUpValidator2) > 0 + }) +} + +func compareByNumQualified(owner1Nodes, owner2Nodes *common.AuctionListValidatorAPIResponse, qualifiedOwners map[string]bool) bool { + owner1Qualified := qualifiedOwners[owner1Nodes.Owner] + owner2Qualified := qualifiedOwners[owner2Nodes.Owner] + + bothQualified := owner1Qualified && owner2Qualified + if !bothQualified { + return owner1Qualified + } + + owner1NumQualified := getNumQualified(owner1Nodes.Nodes) + owner2NumQualified := getNumQualified(owner2Nodes.Nodes) + + return owner1NumQualified > owner2NumQualified +} + +func getNumQualified(nodes []*common.AuctionNode) uint32 { + numQualified := uint32(0) + for _, node := range nodes { + if node.Qualified { + numQualified++ + } + } + + return numQualified +} + +func (vp *validatorsProvider) getAuctionListValidatorsAPIResponse( + selectedNodes []state.ValidatorInfoHandler, +) ([]*common.AuctionListValidatorAPIResponse, map[string]bool) { + auctionListValidators := make([]*common.AuctionListValidatorAPIResponse, 0) + qualifiedOwners := make(map[string]bool) + + for ownerPubKey, ownerData := range vp.stakingDataProvider.GetOwnersData() { + numAuctionNodes := len(ownerData.AuctionList) + if numAuctionNodes > 0 { + ownerEncodedPubKey := vp.addressPubKeyConverter.SilentEncode([]byte(ownerPubKey), log) + auctionValidator := &common.AuctionListValidatorAPIResponse{ + Owner: ownerEncodedPubKey, + NumStakedNodes: ownerData.NumStakedNodes, + TotalTopUp: ownerData.TotalTopUp.String(), + TopUpPerNode: ownerData.TopUpPerNode.String(), + QualifiedTopUp: ownerData.TopUpPerNode.String(), + Nodes: make([]*common.AuctionNode, 0, numAuctionNodes), + } + vp.fillAuctionQualifiedValidatorAPIData(selectedNodes, ownerData, auctionValidator) + auctionListValidators = append(auctionListValidators, auctionValidator) + + qualifiedOwners[ownerEncodedPubKey] = ownerData.Qualified + } + } + + return auctionListValidators, qualifiedOwners +} + +func (vp *validatorsProvider) fillAuctionQualifiedValidatorAPIData( + selectedNodes []state.ValidatorInfoHandler, + ownerData *epochStart.OwnerData, + auctionValidatorAPI *common.AuctionListValidatorAPIResponse, +) { + auctionValidatorAPI.Nodes = make([]*common.AuctionNode, 0, len(ownerData.AuctionList)) + numOwnerQualifiedNodes := int64(0) + for _, nodeInAuction := range ownerData.AuctionList { + auctionNode := &common.AuctionNode{ + BlsKey: vp.validatorPubKeyConverter.SilentEncode(nodeInAuction.GetPublicKey(), log), + Qualified: false, + } + if ownerData.Qualified && contains(selectedNodes, nodeInAuction) { + auctionNode.Qualified = true + numOwnerQualifiedNodes++ + } + + auctionValidatorAPI.Nodes = append(auctionValidatorAPI.Nodes, auctionNode) + } + + if numOwnerQualifiedNodes > 0 { + activeNodes := big.NewInt(ownerData.NumActiveNodes) + qualifiedNodes := big.NewInt(numOwnerQualifiedNodes) + ownerRemainingNodes := big.NewInt(0).Add(activeNodes, qualifiedNodes) + auctionValidatorAPI.QualifiedTopUp = big.NewInt(0).Div(ownerData.TotalTopUp, ownerRemainingNodes).String() + } +} + +func contains(list []state.ValidatorInfoHandler, validator state.ValidatorInfoHandler) bool { + for _, val := range list { + if bytes.Equal(val.GetPublicKey(), validator.GetPublicKey()) { + return true + } + } + return false +} diff --git a/process/peer/validatorsProvider_test.go b/process/peer/validatorsProvider_test.go index cd718e0c78b..71da53f08e7 100644 --- a/process/peer/validatorsProvider_test.go +++ b/process/peer/validatorsProvider_test.go @@ -6,12 +6,14 @@ import ( "encoding/hex" "fmt" "math/big" + "strings" "sync" "sync/atomic" "testing" "time" "github.com/multiversx/mx-chain-core-go/core" + coreAtomic "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/validator" @@ -22,8 +24,10 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewValidatorsProvider_WithNilValidatorStatisticsShouldErr(t *testing.T) { @@ -42,16 +46,36 @@ func TestNewValidatorsProvider_WithMaxRatingZeroShouldErr(t *testing.T) { assert.Nil(t, vp) } -func TestNewValidatorsProvider_WithNilValidatorPubkeyConverterShouldErr(t *testing.T) { +func TestNewValidatorsProvider_WithNilValidatorPubKeyConverterShouldErr(t *testing.T) { arg := createDefaultValidatorsProviderArg() - arg.PubKeyConverter = nil + arg.ValidatorPubKeyConverter = nil vp, err := NewValidatorsProvider(arg) - assert.Equal(t, process.ErrNilPubkeyConverter, err) + assert.True(t, errors.Is(err, process.ErrNilPubkeyConverter)) + assert.True(t, strings.Contains(err.Error(), "validator")) assert.True(t, check.IfNil(vp)) } -func TestNewValidatorsProvider_WithNilNodesCoordinatorrShouldErr(t *testing.T) { +func TestNewValidatorsProvider_WithNilAddressPubkeyConverterShouldErr(t *testing.T) { + arg := createDefaultValidatorsProviderArg() + arg.AddressPubKeyConverter = nil + vp, err := NewValidatorsProvider(arg) + + assert.True(t, errors.Is(err, process.ErrNilPubkeyConverter)) + assert.True(t, strings.Contains(err.Error(), "address")) + assert.True(t, check.IfNil(vp)) +} + +func TestNewValidatorsProvider_WithNilStakingDataProviderShouldErr(t *testing.T) { + arg := createDefaultValidatorsProviderArg() + arg.StakingDataProvider = nil + vp, err := NewValidatorsProvider(arg) + + assert.Equal(t, process.ErrNilStakingDataProvider, err) + assert.True(t, check.IfNil(vp)) +} + +func TestNewValidatorsProvider_WithNilNodesCoordinatorShouldErr(t *testing.T) { arg := createDefaultValidatorsProviderArg() arg.NodesCoordinator = nil vp, err := NewValidatorsProvider(arg) @@ -69,7 +93,7 @@ func TestNewValidatorsProvider_WithNilStartOfEpochTriggerShouldErr(t *testing.T) assert.True(t, check.IfNil(vp)) } -func TestNewValidatorsProvider_WithNilRefresCacheIntervalInSecShouldErr(t *testing.T) { +func TestNewValidatorsProvider_WithZeroRefreshCacheIntervalInSecShouldErr(t *testing.T) { arg := createDefaultValidatorsProviderArg() arg.CacheRefreshIntervalDurationInSec = 0 vp, err := NewValidatorsProvider(arg) @@ -78,25 +102,33 @@ func TestNewValidatorsProvider_WithNilRefresCacheIntervalInSecShouldErr(t *testi assert.True(t, check.IfNil(vp)) } +func TestNewValidatorsProvider_WithNilAuctionListSelectorShouldErr(t *testing.T) { + arg := createDefaultValidatorsProviderArg() + arg.AuctionListSelector = nil + vp, err := NewValidatorsProvider(arg) + + require.Nil(t, vp) + require.Equal(t, epochStart.ErrNilAuctionListSelector, err) +} + func TestValidatorsProvider_GetLatestValidatorsSecondHashDoesNotExist(t *testing.T) { mut := sync.Mutex{} root := []byte("rootHash") e := errors.Errorf("not ok") initialInfo := createMockValidatorInfo() - validatorInfos := map[uint32][]*state.ValidatorInfo{ - 0: {initialInfo}, - } + validatorInfos := state.NewShardValidatorsInfoMap() + _ = validatorInfos.Add(initialInfo) gotOk := false gotNil := false - vs := &mock.ValidatorStatisticsProcessorStub{ + vs := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() (bytes []byte) { mut.Lock() defer mut.Unlock() return root }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m map[uint32][]*state.ValidatorInfo, err error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m state.ShardValidatorsInfoMapHandler, err error) { mut.Lock() defer mut.Unlock() if bytes.Equal([]byte("rootHash"), rootHash) { @@ -167,10 +199,10 @@ func TestValidatorsProvider_CallsPopulateAndRegister(t *testing.T) { }, } - arg.ValidatorStatistics = &mock.ValidatorStatisticsProcessorStub{ - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + arg.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { atomic.AddInt32(&numPopulateCacheCalled, 1) - return nil, nil + return state.NewShardValidatorsInfoMap(), nil }, LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") @@ -189,12 +221,12 @@ func TestValidatorsProvider_UpdateCache_WithError(t *testing.T) { expectedErr := errors.New("expectedError") arg := createDefaultValidatorsProviderArg() - validatorProc := &mock.ValidatorStatisticsProcessorStub{ + validatorProc := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { return nil, expectedErr } @@ -213,7 +245,7 @@ func TestValidatorsProvider_UpdateCache_WithError(t *testing.T) { cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, refreshCache: nil, lock: sync.RWMutex{}, - pubkeyConverter: testscommon.NewPubkeyConverterMock(32), + validatorPubKeyConverter: testscommon.NewPubkeyConverterMock(32), } vsp.updateCache() @@ -233,6 +265,8 @@ func TestValidatorsProvider_Cancel_startRefreshProcess(t *testing.T) { cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, refreshCache: make(chan uint32), lock: sync.RWMutex{}, + stakingDataProvider: &stakingcommon.StakingDataProviderStub{}, + auctionListSelector: &stakingcommon.AuctionListSelectorStub{}, } ctx, cancelFunc := context.WithCancel(context.Background()) @@ -264,21 +298,20 @@ func TestValidatorsProvider_UpdateCache(t *testing.T) { pk := []byte("pk1") initialShardId := uint32(1) initialList := string(common.EligibleList) - validatorsMap := make(map[uint32][]*state.ValidatorInfo) - validatorsMap[initialShardId] = []*state.ValidatorInfo{ - { - PublicKey: pk, - List: initialList, - ShardId: initialShardId, - }, - } + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pk, + List: initialList, + ShardId: initialShardId, + }) + arg := createDefaultValidatorsProviderArg() - validatorProc := &mock.ValidatorStatisticsProcessorStub{ + validatorProc := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { return validatorsMap, nil } @@ -288,16 +321,15 @@ func TestValidatorsProvider_UpdateCache(t *testing.T) { cache: nil, cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, refreshCache: nil, - pubkeyConverter: testscommon.NewPubkeyConverterMock(32), + validatorPubKeyConverter: testscommon.NewPubkeyConverterMock(32), lock: sync.RWMutex{}, } vsp.updateCache() assert.NotNil(t, vsp.cache) - assert.Equal(t, len(validatorsMap[initialShardId]), len(vsp.cache)) - encodedKey, err := arg.PubKeyConverter.Encode(pk) - assert.Nil(t, err) + assert.Equal(t, len(validatorsMap.GetShardValidatorsInfoMap()[initialShardId]), len(vsp.cache)) + encodedKey, _ := arg.ValidatorPubKeyConverter.Encode(pk) assert.NotNil(t, vsp.cache[encodedKey]) assert.Equal(t, initialList, vsp.cache[encodedKey].ValidatorStatus) assert.Equal(t, initialShardId, vsp.cache[encodedKey].ShardId) @@ -315,12 +347,9 @@ func TestValidatorsProvider_aggregatePType_equal(t *testing.T) { trieLeavingShardId := uint32(2) leavingList := string(common.LeavingList) - encodedEligible, err := pubKeyConverter.Encode(pkEligible) - assert.Nil(t, err) - encondedInactive, err := pubKeyConverter.Encode(pkInactive) - assert.Nil(t, err) - encodedLeaving, err := pubKeyConverter.Encode(pkLeaving) - assert.Nil(t, err) + encodedEligible, _ := pubKeyConverter.Encode(pkEligible) + encondedInactive, _ := pubKeyConverter.Encode(pkInactive) + encodedLeaving, _ := pubKeyConverter.Encode(pkLeaving) cache := make(map[string]*validator.ValidatorStatistics) cache[encondedInactive] = &validator.ValidatorStatistics{ValidatorStatus: inactiveList, ShardId: trieInctiveShardId} cache[encodedEligible] = &validator.ValidatorStatistics{ValidatorStatus: eligibleList, ShardId: trieEligibleShardId} @@ -335,7 +364,7 @@ func TestValidatorsProvider_aggregatePType_equal(t *testing.T) { } vp := validatorsProvider{ - pubkeyConverter: pubKeyConverter, + validatorPubKeyConverter: pubKeyConverter, } vp.aggregateLists(cache, validatorsMap, common.EligibleList) @@ -363,47 +392,41 @@ func TestValidatorsProvider_createCache(t *testing.T) { pkNew := []byte("pk5") newList := string(common.NewList) - validatorsMap := make(map[uint32][]*state.ValidatorInfo) + validatorsMap := state.NewShardValidatorsInfoMap() eligibleShardId := uint32(0) waitingShardId := uint32(1) leavingShardId := uint32(2) inactiveShardId := uint32(3) newShardId := core.MetachainShardId - validatorsMap[eligibleShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkEligible, - ShardId: eligibleShardId, - List: eligibleList, - }, - } - validatorsMap[waitingShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkWaiting, - ShardId: waitingShardId, - List: waitingList, - }, - } - validatorsMap[leavingShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkLeaving, - ShardId: leavingShardId, - List: leavingList, - }, - } - validatorsMap[inactiveShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkInactive, - ShardId: inactiveShardId, - List: inactiveList, - }, - } - validatorsMap[newShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkNew, - ShardId: newShardId, - List: newList, - }, - } + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkEligible, + ShardId: eligibleShardId, + List: eligibleList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkWaiting, + ShardId: waitingShardId, + List: waitingList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkLeaving, + ShardId: leavingShardId, + List: leavingList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkInactive, + ShardId: inactiveShardId, + List: inactiveList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkNew, + ShardId: newShardId, + List: newList, + }) arg := createDefaultValidatorsProviderArg() pubKeyConverter := testscommon.NewPubkeyConverterMock(32) vsp := validatorsProvider{ @@ -411,7 +434,7 @@ func TestValidatorsProvider_createCache(t *testing.T) { validatorStatistics: arg.ValidatorStatistics, cache: nil, cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, - pubkeyConverter: pubKeyConverter, + validatorPubKeyConverter: pubKeyConverter, lock: sync.RWMutex{}, } @@ -419,26 +442,22 @@ func TestValidatorsProvider_createCache(t *testing.T) { assert.NotNil(t, cache) - encodedPkEligible, err := pubKeyConverter.Encode(pkEligible) - assert.Nil(t, err) + encodedPkEligible, _ := pubKeyConverter.Encode(pkEligible) assert.NotNil(t, cache[encodedPkEligible]) assert.Equal(t, eligibleList, cache[encodedPkEligible].ValidatorStatus) assert.Equal(t, eligibleShardId, cache[encodedPkEligible].ShardId) - encodedPkWaiting, err := pubKeyConverter.Encode(pkWaiting) - assert.Nil(t, err) + encodedPkWaiting, _ := pubKeyConverter.Encode(pkWaiting) assert.NotNil(t, cache[encodedPkWaiting]) assert.Equal(t, waitingList, cache[encodedPkWaiting].ValidatorStatus) assert.Equal(t, waitingShardId, cache[encodedPkWaiting].ShardId) - encodedPkLeaving, err := pubKeyConverter.Encode(pkLeaving) - assert.Nil(t, err) + encodedPkLeaving, _ := pubKeyConverter.Encode(pkLeaving) assert.NotNil(t, cache[encodedPkLeaving]) assert.Equal(t, leavingList, cache[encodedPkLeaving].ValidatorStatus) assert.Equal(t, leavingShardId, cache[encodedPkLeaving].ShardId) - encodedPkNew, err := pubKeyConverter.Encode(pkNew) - assert.Nil(t, err) + encodedPkNew, _ := pubKeyConverter.Encode(pkNew) assert.NotNil(t, cache[encodedPkNew]) assert.Equal(t, newList, cache[encodedPkNew].ValidatorStatus) assert.Equal(t, newShardId, cache[encodedPkNew].ShardId) @@ -452,31 +471,25 @@ func TestValidatorsProvider_createCache_combined(t *testing.T) { pkLeavingInTrie := []byte("pk3") leavingList := string(common.LeavingList) - validatorsMap := make(map[uint32][]*state.ValidatorInfo) + validatorsMap := state.NewShardValidatorsInfoMap() eligibleShardId := uint32(0) inactiveShardId := uint32(1) leavingShardId := uint32(2) - validatorsMap[eligibleShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkEligibleInTrie, - ShardId: eligibleShardId, - List: eligibleList, - }, - } - validatorsMap[inactiveShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkInactive, - ShardId: inactiveShardId, - List: inactiveList, - }, - } - validatorsMap[leavingShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkLeavingInTrie, - ShardId: leavingShardId, - List: leavingList, - }, - } + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkEligibleInTrie, + ShardId: eligibleShardId, + List: eligibleList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkInactive, + ShardId: inactiveShardId, + List: inactiveList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkLeavingInTrie, + ShardId: leavingShardId, + List: leavingList, + }) arg := createDefaultValidatorsProviderArg() nodesCoordinator := shardingMocks.NewNodesCoordinatorMock() nodesCoordinatorEligibleShardId := uint32(5) @@ -491,7 +504,7 @@ func TestValidatorsProvider_createCache_combined(t *testing.T) { vsp := validatorsProvider{ nodesCoordinator: nodesCoordinator, validatorStatistics: arg.ValidatorStatistics, - pubkeyConverter: arg.PubKeyConverter, + validatorPubKeyConverter: arg.ValidatorPubKeyConverter, cache: nil, cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, lock: sync.RWMutex{}, @@ -499,14 +512,12 @@ func TestValidatorsProvider_createCache_combined(t *testing.T) { cache := vsp.createNewCache(0, validatorsMap) - encodedPkEligible, err := arg.PubKeyConverter.Encode(pkEligibleInTrie) - assert.Nil(t, err) + encodedPkEligible, _ := arg.ValidatorPubKeyConverter.Encode(pkEligibleInTrie) assert.NotNil(t, cache[encodedPkEligible]) assert.Equal(t, eligibleList, cache[encodedPkEligible].ValidatorStatus) assert.Equal(t, nodesCoordinatorEligibleShardId, cache[encodedPkEligible].ShardId) - encodedPkLeavingInTrie, err := arg.PubKeyConverter.Encode(pkLeavingInTrie) - assert.Nil(t, err) + encodedPkLeavingInTrie, _ := arg.ValidatorPubKeyConverter.Encode(pkLeavingInTrie) computedPeerType := fmt.Sprintf(common.CombinedPeerType, common.EligibleList, common.LeavingList) assert.NotNil(t, cache[encodedPkLeavingInTrie]) assert.Equal(t, computedPeerType, cache[encodedPkLeavingInTrie].ValidatorStatus) @@ -519,14 +530,14 @@ func TestValidatorsProvider_CallsPopulateOnlyAfterTimeout(t *testing.T) { arg := createDefaultValidatorsProviderArg() arg.CacheRefreshIntervalDurationInSec = time.Millisecond * 10 - validatorStatisticsProcessor := &mock.ValidatorStatisticsProcessorStub{ + validatorStatisticsProcessor := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { atomic.AddInt32(populateCacheCalled, 1) - return nil, nil + return state.NewShardValidatorsInfoMap(), nil } arg.ValidatorStatistics = validatorStatisticsProcessor @@ -560,31 +571,29 @@ func TestValidatorsProvider_CallsUpdateCacheOnEpochChange(t *testing.T) { arg.CacheRefreshIntervalDurationInSec = 5 * time.Millisecond pkEligibleInTrie := []byte("pk1") - validatorStatisticsProcessor := &mock.ValidatorStatisticsProcessorStub{ + validatorStatisticsProcessor := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { callNumber++ // first call comes from the constructor if callNumber == 1 { - return nil, nil + return state.NewShardValidatorsInfoMap(), nil } - return map[uint32][]*state.ValidatorInfo{ - 0: { - { - PublicKey: pkEligibleInTrie, - List: string(common.EligibleList), - }, - }, - }, nil + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{ + ShardId: 0, + PublicKey: pkEligibleInTrie, + List: string(common.EligibleList), + }) + return validatorsMap, nil } arg.ValidatorStatistics = validatorStatisticsProcessor vsp, _ := NewValidatorsProvider(arg) - encodedEligible, err := arg.PubKeyConverter.Encode(pkEligibleInTrie) - assert.Nil(t, err) + encodedEligible, _ := arg.ValidatorPubKeyConverter.Encode(pkEligibleInTrie) assert.Equal(t, 0, len(vsp.GetCache())) // nothing in cache epochStartNotifier.NotifyAll(&block.Header{Nonce: 1, ShardID: 2, Round: 3}) time.Sleep(arg.CacheRefreshIntervalDurationInSec) @@ -600,31 +609,29 @@ func TestValidatorsProvider_DoesntCallUpdateUpdateCacheWithoutRequests(t *testin arg.CacheRefreshIntervalDurationInSec = 5 * time.Millisecond pkEligibleInTrie := []byte("pk1") - validatorStatisticsProcessor := &mock.ValidatorStatisticsProcessorStub{ + validatorStatisticsProcessor := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { callNumber++ // first call comes from the constructor if callNumber == 1 { - return nil, nil + return state.NewShardValidatorsInfoMap(), nil } - return map[uint32][]*state.ValidatorInfo{ - 0: { - { - PublicKey: pkEligibleInTrie, - List: string(common.EligibleList), - }, - }, - }, nil + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{ + ShardId: 0, + PublicKey: pkEligibleInTrie, + List: string(common.EligibleList), + }) + return validatorsMap, nil } arg.ValidatorStatistics = validatorStatisticsProcessor vsp, _ := NewValidatorsProvider(arg) - encodedEligible, err := arg.PubKeyConverter.Encode(pkEligibleInTrie) - assert.Nil(t, err) + encodedEligible, _ := arg.ValidatorPubKeyConverter.Encode(pkEligibleInTrie) assert.Equal(t, 0, len(vsp.GetCache())) // nothing in cache time.Sleep(arg.CacheRefreshIntervalDurationInSec) assert.Equal(t, 0, len(vsp.GetCache())) // nothing in cache @@ -636,6 +643,492 @@ func TestValidatorsProvider_DoesntCallUpdateUpdateCacheWithoutRequests(t *testin assert.Equal(t, 1, len(resp)) assert.NotNil(t, vsp.GetCache()[encodedEligible]) } + +func TestValidatorsProvider_GetAuctionList(t *testing.T) { + t.Parallel() + + t.Run("error getting root hash", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return nil + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, state.ErrNilRootHash, err) + }) + + t.Run("error getting validators info for root hash", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + expectedErr := errors.New("local error") + expectedRootHash := []byte("root hash") + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return expectedRootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + require.Equal(t, expectedRootHash, rootHash) + return nil, expectedErr + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, expectedErr, err) + }) + + t.Run("error filling validator info, staking data provider cache should be cleaned", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + cleanCalled := &coreAtomic.Flag{} + expectedValidator := &state.ValidatorInfo{PublicKey: []byte("pubKey"), List: string(common.AuctionList)} + expectedErr := errors.New("local error") + expectedRootHash := []byte("root hash") + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return expectedRootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + require.Equal(t, expectedRootHash, rootHash) + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(expectedValidator) + return validatorsMap, nil + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + FillValidatorInfoCalled: func(validator state.ValidatorInfoHandler) error { + require.Equal(t, expectedValidator, validator) + return expectedErr + }, + CleanCalled: func() { + cleanCalled.SetValue(true) + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, expectedErr, err) + require.True(t, cleanCalled.IsSet()) + }) + + t.Run("error selecting nodes from auction, staking data provider cache should be cleaned", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + cleanCalled := &coreAtomic.Flag{} + expectedErr := errors.New("local error") + args.AuctionListSelector = &stakingcommon.AuctionListSelectorStub{ + SelectNodesFromAuctionListCalled: func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error { + return expectedErr + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + CleanCalled: func() { + cleanCalled.SetValue(true) + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, expectedErr, err) + require.True(t, cleanCalled.IsSet()) + }) + + t.Run("empty list, check normal flow is executed", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + expectedRootHash := []byte("root hash") + ctRootHashCalled := uint32(0) + ctGetValidatorsInfoForRootHash := uint32(0) + ctSelectNodesFromAuctionList := uint32(0) + ctFillValidatorInfoCalled := uint32(0) + ctGetOwnersDataCalled := uint32(0) + ctComputeUnqualifiedNodes := uint32(0) + + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + atomic.AddUint32(&ctRootHashCalled, 1) + return expectedRootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + atomic.AddUint32(&ctGetValidatorsInfoForRootHash, 1) + require.Equal(t, expectedRootHash, rootHash) + return state.NewShardValidatorsInfoMap(), nil + }, + } + args.AuctionListSelector = &stakingcommon.AuctionListSelectorStub{ + SelectNodesFromAuctionListCalled: func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error { + atomic.AddUint32(&ctSelectNodesFromAuctionList, 1) + require.Equal(t, expectedRootHash, randomness) + return nil + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + FillValidatorInfoCalled: func(validator state.ValidatorInfoHandler) error { + atomic.AddUint32(&ctFillValidatorInfoCalled, 1) + return nil + }, + GetOwnersDataCalled: func() map[string]*epochStart.OwnerData { + atomic.AddUint32(&ctGetOwnersDataCalled, 1) + return nil + }, + ComputeUnQualifiedNodesCalled: func(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { + atomic.AddUint32(&ctComputeUnqualifiedNodes, 1) + return nil, nil, nil + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, err) + require.Empty(t, list) + require.Equal(t, ctRootHashCalled, uint32(2)) // another call is from constructor in startRefreshProcess.updateCache + require.Equal(t, ctGetValidatorsInfoForRootHash, uint32(2)) // another call is from constructor in startRefreshProcess.updateCache + require.Equal(t, ctFillValidatorInfoCalled, uint32(0)) + require.Equal(t, ctGetOwnersDataCalled, uint32(1)) + require.Equal(t, ctComputeUnqualifiedNodes, uint32(1)) + require.Equal(t, expectedRootHash, vp.cachedRandomness) + }) + + t.Run("normal flow, check data is correctly computed", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1"), List: string(common.AuctionList)} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2"), List: string(common.AuctionList)} + v3 := &state.ValidatorInfo{PublicKey: []byte("pk3"), List: string(common.AuctionList)} + v4 := &state.ValidatorInfo{PublicKey: []byte("pk4"), List: string(common.AuctionList)} + v5 := &state.ValidatorInfo{PublicKey: []byte("pk5"), List: string(common.AuctionList)} + v6 := &state.ValidatorInfo{PublicKey: []byte("pk6"), List: string(common.AuctionList)} + v7 := &state.ValidatorInfo{PublicKey: []byte("pk7"), List: string(common.EligibleList)} + v8 := &state.ValidatorInfo{PublicKey: []byte("pk8"), List: string(common.WaitingList)} + v9 := &state.ValidatorInfo{PublicKey: []byte("pk9"), List: string(common.LeavingList)} + v10 := &state.ValidatorInfo{PublicKey: []byte("pk10"), List: string(common.JailedList)} + v11 := &state.ValidatorInfo{PublicKey: []byte("pk11"), List: string(common.AuctionList)} + v12 := &state.ValidatorInfo{PublicKey: []byte("pk12"), List: string(common.AuctionList)} + + owner1 := "owner1" + owner2 := "owner2" + owner3 := "owner3" + owner4 := "owner4" + owner5 := "owner5" + owner6 := "owner6" + owner7 := "owner7" + ownersData := map[string]*epochStart.OwnerData{ + owner1: { + NumStakedNodes: 3, + NumActiveNodes: 1, + TotalTopUp: big.NewInt(7500), + TopUpPerNode: big.NewInt(2500), + AuctionList: []state.ValidatorInfoHandler{v1, v2}, // owner1 will have v1 & v2 selected + Qualified: true, // with qualifiedTopUp = 2500 + }, + owner2: { + NumStakedNodes: 3, + NumActiveNodes: 1, + TotalTopUp: big.NewInt(3000), + TopUpPerNode: big.NewInt(1000), + AuctionList: []state.ValidatorInfoHandler{v3, v4}, // owner2 will have v3 selected + Qualified: true, // with qualifiedTopUp = 1500 + }, + owner3: { + NumStakedNodes: 2, + NumActiveNodes: 0, + TotalTopUp: big.NewInt(4000), + TopUpPerNode: big.NewInt(2000), + AuctionList: []state.ValidatorInfoHandler{v5, v6}, // owner3 will have v5 selected + Qualified: true, // with qualifiedTopUp = 4000 + }, + owner4: { + NumStakedNodes: 3, + NumActiveNodes: 2, + TotalTopUp: big.NewInt(0), + TopUpPerNode: big.NewInt(0), + AuctionList: []state.ValidatorInfoHandler{v7}, // owner4 has one node in auction, but is not qualified + Qualified: false, // should be sent at the bottom of the list + }, + owner5: { + NumStakedNodes: 5, + NumActiveNodes: 5, + TotalTopUp: big.NewInt(5000), + TopUpPerNode: big.NewInt(1000), + AuctionList: []state.ValidatorInfoHandler{}, // owner5 has no nodes in auction, will not appear in API list + Qualified: true, + }, + // owner6 has same stats as owner7. After selection, owner7 will have its node selected => should be listed above owner 6 + owner6: { + NumStakedNodes: 1, + NumActiveNodes: 0, + TotalTopUp: big.NewInt(0), + TopUpPerNode: big.NewInt(0), + AuctionList: []state.ValidatorInfoHandler{v11}, + Qualified: true, // should be added + }, + owner7: { + NumStakedNodes: 1, + NumActiveNodes: 0, + TotalTopUp: big.NewInt(0), + TopUpPerNode: big.NewInt(0), + AuctionList: []state.ValidatorInfoHandler{v12}, + Qualified: true, + }, + } + + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(v1) + _ = validatorsMap.Add(v2) + _ = validatorsMap.Add(v3) + _ = validatorsMap.Add(v4) + _ = validatorsMap.Add(v5) + _ = validatorsMap.Add(v6) + _ = validatorsMap.Add(v7) + _ = validatorsMap.Add(v8) + _ = validatorsMap.Add(v9) + _ = validatorsMap.Add(v10) + _ = validatorsMap.Add(v11) + _ = validatorsMap.Add(v12) + + rootHash := []byte("root hash") + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return rootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + return validatorsMap, nil + }, + } + args.AuctionListSelector = &stakingcommon.AuctionListSelectorStub{ + SelectNodesFromAuctionListCalled: func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error { + selectedV1 := v1.ShallowClone() + selectedV1.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v1, selectedV1) + + selectedV2 := v2.ShallowClone() + selectedV2.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v2, selectedV2) + + selectedV3 := v3.ShallowClone() + selectedV3.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v3, selectedV3) + + selectedV5 := v5.ShallowClone() + selectedV5.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v5, selectedV5) + + selectedV12 := v12.ShallowClone() + selectedV12.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v12, selectedV12) + + return nil + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + GetOwnersDataCalled: func() map[string]*epochStart.OwnerData { + return ownersData + }, + } + + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + expectedList := []*common.AuctionListValidatorAPIResponse{ + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner3), log), + NumStakedNodes: 2, + TotalTopUp: "4000", + TopUpPerNode: "2000", + QualifiedTopUp: "4000", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v5.PublicKey, log), + Qualified: true, + }, + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v6.PublicKey, log), + Qualified: false, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner1), log), + NumStakedNodes: 3, + TotalTopUp: "7500", + TopUpPerNode: "2500", + QualifiedTopUp: "2500", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v1.PublicKey, log), + Qualified: true, + }, + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v2.PublicKey, log), + Qualified: true, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner2), log), + NumStakedNodes: 3, + TotalTopUp: "3000", + TopUpPerNode: "1000", + QualifiedTopUp: "1500", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v3.PublicKey, log), + Qualified: true, + }, + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v4.PublicKey, log), + Qualified: false, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner7), log), + NumStakedNodes: 1, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v12.PublicKey, log), + Qualified: true, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner6), log), + NumStakedNodes: 1, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v11.PublicKey, log), + Qualified: false, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner4), log), + NumStakedNodes: 3, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v7.PublicKey, log), + Qualified: false, + }, + }, + }, + } + + list, err := vp.GetAuctionList() + require.Nil(t, err) + require.Equal(t, expectedList, list) + }) + + t.Run("concurrent calls should only update cache once", func(t *testing.T) { + t.Parallel() + + args := createDefaultValidatorsProviderArg() + + args.CacheRefreshIntervalDurationInSec = time.Second * 5 + + expectedRootHash := []byte("root hash") + ctRootHashCalled := uint32(0) + ctSelectNodesFromAuctionList := uint32(0) + ctFillValidatorInfoCalled := uint32(0) + ctGetOwnersDataCalled := uint32(0) + ctComputeUnqualifiedNodes := uint32(0) + + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + atomic.AddUint32(&ctRootHashCalled, 1) + return expectedRootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + require.Equal(t, expectedRootHash, rootHash) + return state.NewShardValidatorsInfoMap(), nil + }, + } + args.AuctionListSelector = &stakingcommon.AuctionListSelectorStub{ + SelectNodesFromAuctionListCalled: func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error { + atomic.AddUint32(&ctSelectNodesFromAuctionList, 1) + require.Equal(t, expectedRootHash, randomness) + return nil + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + FillValidatorInfoCalled: func(validator state.ValidatorInfoHandler) error { + atomic.AddUint32(&ctFillValidatorInfoCalled, 1) + return nil + }, + GetOwnersDataCalled: func() map[string]*epochStart.OwnerData { + atomic.AddUint32(&ctGetOwnersDataCalled, 1) + return nil + }, + ComputeUnQualifiedNodesCalled: func(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { + atomic.AddUint32(&ctComputeUnqualifiedNodes, 1) + return nil, nil, nil + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + numCalls := 99 + wg := sync.WaitGroup{} + wg.Add(numCalls) + + for i := 0; i < numCalls; i++ { + go func(idx int) { + switch idx % 3 { + case 0: + list, err := vp.GetAuctionList() + require.NoError(t, err) + require.Empty(t, list) + case 1: + err := vp.ForceUpdate() + require.NoError(t, err) + case 2: + _ = vp.GetLatestValidators() + } + + wg.Done() + }(i) + } + + wg.Wait() + + // expectedMaxNumCalls is: + // - 1 from constructor + // - 1 from GetAuctionList, should not update second time + // - 1 from GetLatestValidators, should not update second time + // - 33 calls * 2 from ForceUpdate, calling it twice/call + expectedMaxNumCalls := uint32(1 + 1 + 1 + 66) + require.LessOrEqual(t, ctRootHashCalled, expectedMaxNumCalls) + + require.NoError(t, vp.Close()) + }) + +} + func createMockValidatorInfo() *state.ValidatorInfo { initialInfo := &state.ValidatorInfo{ PublicKey: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), @@ -675,13 +1168,16 @@ func createDefaultValidatorsProviderArg() ArgValidatorsProvider { NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, StartEpoch: 1, EpochStartEventNotifier: &mock.EpochStartNotifierStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, CacheRefreshIntervalDurationInSec: 1 * time.Millisecond, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorStub{ + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, }, - MaxRating: 100, - PubKeyConverter: testscommon.NewPubkeyConverterMock(32), + MaxRating: 100, + ValidatorPubKeyConverter: testscommon.NewPubkeyConverterMock(32), + AddressPubKeyConverter: testscommon.NewPubkeyConverterMock(32), + AuctionListSelector: &stakingcommon.AuctionListSelectorStub{}, } } diff --git a/process/rating/chance.go b/process/rating/chance.go index 8ad3c092cec..71233ba3d3e 100644 --- a/process/rating/chance.go +++ b/process/rating/chance.go @@ -9,17 +9,17 @@ type selectionChance struct { chancePercentage uint32 } -//GetMaxThreshold returns the maxThreshold until this ChancePercentage holds +// GetMaxThreshold returns the maxThreshold until this ChancePercentage holds func (bsr *selectionChance) GetMaxThreshold() uint32 { return bsr.maxThreshold } -//GetChancePercentage returns the percentage for the RatingChance +// GetChancePercentage returns the percentage for the RatingChance func (bsr *selectionChance) GetChancePercentage() uint32 { return bsr.chancePercentage } -//IsInterfaceNil verifies if the interface is nil +// IsInterfaceNil verifies if the interface is nil func (bsr *selectionChance) IsInterfaceNil() bool { return bsr == nil } diff --git a/process/rating/disabledRatingReader.go b/process/rating/disabledRatingReader.go index 8b7ac6662c1..b57f06b2dca 100644 --- a/process/rating/disabledRatingReader.go +++ b/process/rating/disabledRatingReader.go @@ -10,17 +10,17 @@ func NewDisabledRatingReader(startRating uint32) *disabledRatingReader { return &disabledRatingReader{startRating: startRating} } -//GetRating gets the rating for the public key +// GetRating gets the rating for the public key func (rr *disabledRatingReader) GetRating(string) uint32 { return rr.startRating } -//UpdateRatingFromTempRating sets the new rating to the value of the tempRating +// UpdateRatingFromTempRating sets the new rating to the value of the tempRating func (rr *disabledRatingReader) UpdateRatingFromTempRating([]string) error { return nil } -//IsInterfaceNil verifies if the interface is nil +// IsInterfaceNil verifies if the interface is nil func (rr *disabledRatingReader) IsInterfaceNil() bool { return rr == nil } diff --git a/process/scToProtocol/stakingToPeer.go b/process/scToProtocol/stakingToPeer.go index af8bc00d688..363a7975a7a 100644 --- a/process/scToProtocol/stakingToPeer.go +++ b/process/scToProtocol/stakingToPeer.go @@ -11,14 +11,15 @@ import ( "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-logger-go" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" - "github.com/multiversx/mx-chain-logger-go" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) var _ process.SmartContractToProtocolHandler = (*stakingToPeer)(nil) @@ -109,6 +110,7 @@ func checkIfNil(args ArgStakingToPeer) error { return core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.StakeFlag, common.ValidatorToDelegationFlag, + common.UnJailCleanupFlag, }) } @@ -230,16 +232,17 @@ func (stp *stakingToPeer) updatePeerStateV1( isValidator := account.GetList() == string(common.EligibleList) || account.GetList() == string(common.WaitingList) isJailed := stakingData.JailedNonce >= stakingData.UnJailedNonce && stakingData.JailedNonce > 0 + isStakingV4Started := stp.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) if !isJailed { if stakingData.StakedNonce == nonce && !isValidator { - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.RegisterNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.RegisterNonce), isStakingV4Started) account.SetTempRating(stp.startRating) account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) } if stakingData.UnStakedNonce == nonce && account.GetList() != string(common.InactiveList) { - account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce), isStakingV4Started) account.SetUnStakedEpoch(stakingData.UnStakedEpoch) } } @@ -250,7 +253,7 @@ func (stp *stakingToPeer) updatePeerStateV1( } if !isValidator && account.GetUnStakedEpoch() == common.DefaultUnstakedEpoch { - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.UnJailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.UnJailedNonce), isStakingV4Started) } } @@ -276,11 +279,13 @@ func (stp *stakingToPeer) updatePeerState( return err } + isStakingV4Started := stp.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) + isUnJailForInactive := !isNew && !stakingData.Staked && stakingData.UnJailedNonce == nonce && account.GetList() == string(common.JailedList) if isUnJailForInactive { log.Debug("unJail for inactive node changed status to inactive list", "blsKey", account.AddressBytes(), "unStakedEpoch", stakingData.UnStakedEpoch) - account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce), isStakingV4Started) if account.GetTempRating() < stp.unJailRating { account.SetTempRating(stp.unJailRating) } @@ -313,18 +318,23 @@ func (stp *stakingToPeer) updatePeerState( log.Debug("new node", "blsKey", blsPubKey) } + newNodesList := common.NewList + if isStakingV4Started { + newNodesList = common.AuctionList + } + isValidator := account.GetList() == string(common.EligibleList) || account.GetList() == string(common.WaitingList) if !stakingData.Jailed { if stakingData.StakedNonce == nonce && !isValidator { - log.Debug("node is staked, changed status to new", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.StakedNonce)) + log.Debug("node is staked, changed status to", "list", newNodesList, "blsKey", blsPubKey) + account.SetListAndIndex(account.GetShardId(), string(newNodesList), uint32(stakingData.StakedNonce), isStakingV4Started) account.SetTempRating(stp.startRating) account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) } if stakingData.UnStakedNonce == nonce && account.GetList() != string(common.InactiveList) { log.Debug("node is unStaked, changed status to leaving list", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce), isStakingV4Started) account.SetUnStakedEpoch(stakingData.UnStakedEpoch) } } @@ -333,24 +343,27 @@ func (stp *stakingToPeer) updatePeerState( if account.GetTempRating() < stp.unJailRating { log.Debug("node is unJailed, setting temp rating to start rating", "blsKey", blsPubKey) account.SetTempRating(stp.unJailRating) + if stp.enableEpochsHandler.IsFlagEnabled(common.UnJailCleanupFlag) { + account.SetConsecutiveProposerMisses(0) + } } isNewValidator := !isValidator && stakingData.Staked if isNewValidator { - log.Debug("node is unJailed and staked, changing status to new list", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.UnJailedNonce)) + log.Debug("node is unJailed and staked, changing status to", "list", newNodesList, "blsKey", blsPubKey) + account.SetListAndIndex(account.GetShardId(), string(newNodesList), uint32(stakingData.UnJailedNonce), isStakingV4Started) } if account.GetList() == string(common.JailedList) { log.Debug("node is unJailed and not staked, changing status to inactive list", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce), isStakingV4Started) account.SetUnStakedEpoch(stakingData.UnStakedEpoch) } } if stakingData.JailedNonce == nonce && account.GetList() != string(common.InactiveList) { log.Debug("node is jailed, setting status to leaving", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.JailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.JailedNonce), isStakingV4Started) account.SetTempRating(stp.jailRating) } diff --git a/process/scToProtocol/stakingToPeer_test.go b/process/scToProtocol/stakingToPeer_test.go index 4ac4a2fa081..a6f0d80bc1b 100644 --- a/process/scToProtocol/stakingToPeer_test.go +++ b/process/scToProtocol/stakingToPeer_test.go @@ -40,7 +40,7 @@ func createMockArgumentsNewStakingToPeer() ArgStakingToPeer { Marshalizer: &mock.MarshalizerStub{}, PeerState: &stateMock.AccountsStub{}, BaseState: &stateMock.AccountsStub{}, - ArgParser: &mock.ArgumentParserMock{}, + ArgParser: &testscommon.ArgumentParserMock{}, CurrTxs: &mock.TxForCurrentBlockStub{}, RatingsData: &mock.RatingsInfoMock{}, EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakeFlag, common.ValidatorToDelegationFlag), @@ -227,7 +227,7 @@ func TestStakingToPeer_UpdateProtocolCannotGetStorageUpdatesShouldErr(t *testing }, nil } - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} argParser.GetStorageUpdatesCalled = func(data string) (updates []*vmcommon.StorageUpdate, e error) { return nil, testError } @@ -252,7 +252,7 @@ func TestStakingToPeer_UpdateProtocolRemoveAccountShouldReturnNil(t *testing.T) }, nil } - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} argParser.GetStorageUpdatesCalled = func(data string) (updates []*vmcommon.StorageUpdate, e error) { return []*vmcommon.StorageUpdate{ {Offset: []byte("aabbcc"), Data: []byte("data1")}, @@ -311,7 +311,7 @@ func TestStakingToPeer_UpdateProtocolCannotSetRewardAddressShouldErr(t *testing. offset = append(offset, 99) } - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} argParser.GetStorageUpdatesCalled = func(data string) (updates []*vmcommon.StorageUpdate, e error) { return []*vmcommon.StorageUpdate{ {Offset: offset, Data: []byte("data1")}, @@ -368,7 +368,7 @@ func TestStakingToPeer_UpdateProtocolEmptyDataShouldNotAddToTrie(t *testing.T) { offset = append(offset, 99) } - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} argParser.GetStorageUpdatesCalled = func(data string) (updates []*vmcommon.StorageUpdate, e error) { return []*vmcommon.StorageUpdate{ {Offset: offset, Data: []byte("data1")}, @@ -429,7 +429,7 @@ func TestStakingToPeer_UpdateProtocolCannotSaveAccountShouldErr(t *testing.T) { offset = append(offset, 99) } - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} argParser.GetStorageUpdatesCalled = func(data string) (updates []*vmcommon.StorageUpdate, e error) { return []*vmcommon.StorageUpdate{ {Offset: offset, Data: []byte("data1")}, @@ -492,7 +492,7 @@ func TestStakingToPeer_UpdateProtocolCannotSaveAccountNonceShouldErr(t *testing. offset = append(offset, 99) } - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} argParser.GetStorageUpdatesCalled = func(data string) (updates []*vmcommon.StorageUpdate, e error) { return []*vmcommon.StorageUpdate{ {Offset: offset, Data: []byte("data1")}, @@ -554,7 +554,7 @@ func TestStakingToPeer_UpdateProtocol(t *testing.T) { offset = append(offset, 99) } - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} argParser.GetStorageUpdatesCalled = func(data string) (updates []*vmcommon.StorageUpdate, e error) { return []*vmcommon.StorageUpdate{ {Offset: offset, Data: []byte("data1")}, @@ -617,7 +617,7 @@ func TestStakingToPeer_UpdateProtocolCannotSaveUnStakedNonceShouldErr(t *testing offset = append(offset, 99) } - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} argParser.GetStorageUpdatesCalled = func(data string) (updates []*vmcommon.StorageUpdate, e error) { return []*vmcommon.StorageUpdate{ {Offset: offset, Data: []byte("data1")}, @@ -673,8 +673,10 @@ func TestStakingToPeer_UpdatePeerState(t *testing.T) { }, } + enableEpochsHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakeFlag, common.ValidatorToDelegationFlag) arguments := createMockArgumentsNewStakingToPeer() arguments.PeerState = peerAccountsDB + arguments.EnableEpochsHandler = enableEpochsHandler stp, _ := NewStakingToPeer(arguments) stakingData := systemSmartContracts.StakedDataV2_0{ @@ -703,11 +705,19 @@ func TestStakingToPeer_UpdatePeerState(t *testing.T) { assert.True(t, bytes.Equal(stakingData.RewardAddress, peerAccount.GetRewardAddress())) assert.Equal(t, string(common.NewList), peerAccount.GetList()) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + err = stp.updatePeerState(stakingData, blsPubKey, nonce) + assert.NoError(t, err) + assert.True(t, bytes.Equal(blsPubKey, peerAccount.GetBLSPublicKey())) + assert.True(t, bytes.Equal(stakingData.RewardAddress, peerAccount.GetRewardAddress())) + assert.Equal(t, string(common.AuctionList), peerAccount.GetList()) + enableEpochsHandler.RemoveActiveFlags(common.StakingV4StartedFlag) + stakingData.UnStakedNonce = 11 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnStakedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) - peerAccount.SetListAndIndex(0, string(common.EligibleList), 5) + peerAccount.SetListAndIndex(0, string(common.EligibleList), 5, false) stakingData.JailedNonce = 12 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.JailedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) @@ -721,6 +731,12 @@ func TestStakingToPeer_UpdatePeerState(t *testing.T) { _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnJailedNonce) assert.Equal(t, string(common.NewList), peerAccount.GetList()) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + err = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnJailedNonce) + assert.NoError(t, err) + assert.Equal(t, string(common.AuctionList), peerAccount.GetList()) + enableEpochsHandler.RemoveActiveFlags(common.StakingV4StartedFlag) + stakingData.UnStakedNonce = 15 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnStakedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) @@ -769,7 +785,7 @@ func TestStakingToPeer_UnJailFromInactive(t *testing.T) { _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnStakedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) - peerAccount.SetListAndIndex(0, string(common.JailedList), 5) + peerAccount.SetListAndIndex(0, string(common.JailedList), 5, false) stakingData.UnJailedNonce = 14 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnJailedNonce) assert.Equal(t, string(common.InactiveList), peerAccount.GetList()) diff --git a/process/smartContract/builtInFunctions/factory_test.go b/process/smartContract/builtInFunctions/factory_test.go index abf71000038..81f62c7085a 100644 --- a/process/smartContract/builtInFunctions/factory_test.go +++ b/process/smartContract/builtInFunctions/factory_test.go @@ -88,6 +88,11 @@ func fillGasMapBuiltInCosts(value uint64) map[string]uint64 { gasMap["ESDTNFTAddUri"] = value gasMap["ESDTNFTUpdateAttributes"] = value gasMap["ESDTNFTMultiTransfer"] = value + gasMap["ESDTModifyRoyalties"] = value + gasMap["ESDTModifyCreator"] = value + gasMap["ESDTNFTRecreate"] = value + gasMap["ESDTNFTUpdate"] = value + gasMap["ESDTNFTSetNewURIs"] = value gasMap["SetGuardian"] = value gasMap["GuardAccount"] = value gasMap["TrieLoadPerNode"] = value @@ -168,7 +173,7 @@ func TestCreateBuiltInFunctionContainer(t *testing.T) { args := createMockArguments() builtInFuncFactory, err := CreateBuiltInFunctionsFactory(args) assert.Nil(t, err) - assert.Equal(t, 36, len(builtInFuncFactory.BuiltInFunctionContainer().Keys())) + assert.Equal(t, 42, len(builtInFuncFactory.BuiltInFunctionContainer().Keys())) err = builtInFuncFactory.SetPayableHandler(&testscommon.BlockChainHookStub{}) assert.Nil(t, err) diff --git a/process/smartContract/hooks/blockChainHook.go b/process/smartContract/hooks/blockChainHook.go index 827d08da435..6e590484992 100644 --- a/process/smartContract/hooks/blockChainHook.go +++ b/process/smartContract/hooks/blockChainHook.go @@ -525,7 +525,7 @@ func (bh *BlockChainHookImpl) processMaxBuiltInCounters(input *vmcommon.Contract // SaveNFTMetaDataToSystemAccount will save NFT meta-data to system account for the given transaction func (bh *BlockChainHookImpl) SaveNFTMetaDataToSystemAccount(tx data.TransactionHandler) error { - return bh.nftStorageHandler.SaveNFTMetaDataToSystemAccount(tx) + return bh.nftStorageHandler.SaveNFTMetaData(tx) } // GetShardOfAddress is the hook that returns the shard of a given address @@ -826,8 +826,7 @@ func (bh *BlockChainHookImpl) makeCompiledSCStorage() error { dbConfig := factory.GetDBFromConfig(bh.configSCStorage.DB) dbConfig.FilePath = path.Join(bh.workingDir, defaultCompiledSCPath, bh.configSCStorage.DB.FilePath) - dbConfigHandler := factory.NewDBConfigHandler(bh.configSCStorage.DB) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(bh.configSCStorage.DB) if err != nil { return err } diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 9d1c8bcd4f3..25031dcbf4a 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -18,6 +18,10 @@ import ( vmData "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + logger "github.com/multiversx/mx-chain-logger-go" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-vm-common-go/parsers" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/smartContract/scrCommon" @@ -25,9 +29,6 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/vm" - logger "github.com/multiversx/mx-chain-logger-go" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/multiversx/mx-chain-vm-common-go/parsers" ) var _ process.SmartContractResultProcessor = (*scProcessor)(nil) @@ -180,7 +181,6 @@ func NewSmartContractProcessor(args scrCommon.ArgsNewSmartContractProcessor) (*s common.OptimizeGasUsedInCrossMiniBlocksFlag, common.OptimizeNFTStoreFlag, common.RemoveNonUpdatedStorageFlag, - common.BuiltInFunctionOnMetaFlag, common.BackwardCompSaveKeyValueFlag, common.ReturnDataToLastTransferFlagAfterEpoch, common.FixAsyncCallBackArgsListFlag, @@ -536,7 +536,7 @@ func (sc *scProcessor) finishSCExecution( return 0, err } - err = sc.scrForwarder.AddIntermediateTransactions(finalResults) + err = sc.scrForwarder.AddIntermediateTransactions(finalResults, txHash) if err != nil { log.Error("AddIntermediateTransactions error", "error", err.Error()) return 0, err @@ -869,7 +869,7 @@ func (sc *scProcessor) resolveFailedTransaction( } if _, ok := tx.(*transaction.Transaction); ok { - err = sc.badTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{tx}) + err = sc.badTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{tx}, txHash) if err != nil { return err } @@ -1437,7 +1437,7 @@ func (sc *scProcessor) processIfErrorWithAddedLogs( userErrorLog := createNewLogFromSCRIfError(scrIfError) if !sc.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) || !sc.isInformativeTxHandler(scrIfError) { - err = sc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrIfError}) + err = sc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrIfError}, txHash) if err != nil { return err } @@ -1576,7 +1576,7 @@ func (sc *scProcessor) processForRelayerWhenError( } if !sc.enableEpochsHandler.IsFlagEnabled(common.CleanUpInformativeSCRsFlag) || scrForRelayer.Value.Cmp(zero) > 0 { - err = sc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrForRelayer}) + err = sc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrForRelayer}, txHash) if err != nil { return nil, err } @@ -1814,7 +1814,7 @@ func (sc *scProcessor) doDeploySmartContract( return 0, err } - err = sc.scrForwarder.AddIntermediateTransactions(finalResults) + err = sc.scrForwarder.AddIntermediateTransactions(finalResults, txHash) if err != nil { log.Debug("AddIntermediate Transaction error", "error", err.Error()) return 0, err @@ -2823,7 +2823,7 @@ func (sc *scProcessor) ProcessSmartContractResult(scr *smartContractResult.Smart returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err case process.BuiltInFunctionCall: - if sc.shardCoordinator.SelfId() == core.MetachainShardId && !sc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag) { + if sc.shardCoordinator.SelfId() == core.MetachainShardId { returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err } @@ -2861,7 +2861,8 @@ func (sc *scProcessor) processSimpleSCR( if err != nil { return err } - if !isPayable && !bytes.Equal(scResult.RcvAddr, scResult.OriginalSender) { + isSenderMeta := sc.shardCoordinator.ComputeId(scResult.SndAddr) == core.MetachainShardId + if !isPayable && !bytes.Equal(scResult.RcvAddr, scResult.OriginalSender) && !isSenderMeta { return process.ErrAccountNotPayable } diff --git a/process/smartContract/processProxy/processProxy.go b/process/smartContract/processProxy/processProxy.go index d2408c36dfa..a36a5fbd4f4 100644 --- a/process/smartContract/processProxy/processProxy.go +++ b/process/smartContract/processProxy/processProxy.go @@ -50,29 +50,30 @@ func NewSmartContractProcessorProxy(args scrCommon.ArgsNewSmartContractProcessor proxy := &scProcessorProxy{ args: scrCommon.ArgsNewSmartContractProcessor{ - VmContainer: args.VmContainer, - ArgsParser: args.ArgsParser, - Hasher: args.Hasher, - Marshalizer: args.Marshalizer, - AccountsDB: args.AccountsDB, - BlockChainHook: args.BlockChainHook, - BuiltInFunctions: args.BuiltInFunctions, - PubkeyConv: args.PubkeyConv, - ShardCoordinator: args.ShardCoordinator, - ScrForwarder: args.ScrForwarder, - TxFeeHandler: args.TxFeeHandler, - EconomicsFee: args.EconomicsFee, - TxTypeHandler: args.TxTypeHandler, - GasHandler: args.GasHandler, - GasSchedule: args.GasSchedule, - TxLogsProcessor: args.TxLogsProcessor, - BadTxForwarder: args.BadTxForwarder, - EnableRoundsHandler: args.EnableRoundsHandler, - EnableEpochsHandler: args.EnableEpochsHandler, - EnableEpochs: args.EnableEpochs, - VMOutputCacher: args.VMOutputCacher, - WasmVMChangeLocker: args.WasmVMChangeLocker, - IsGenesisProcessing: args.IsGenesisProcessing, + VmContainer: args.VmContainer, + ArgsParser: args.ArgsParser, + Hasher: args.Hasher, + Marshalizer: args.Marshalizer, + AccountsDB: args.AccountsDB, + BlockChainHook: args.BlockChainHook, + BuiltInFunctions: args.BuiltInFunctions, + PubkeyConv: args.PubkeyConv, + ShardCoordinator: args.ShardCoordinator, + ScrForwarder: args.ScrForwarder, + TxFeeHandler: args.TxFeeHandler, + EconomicsFee: args.EconomicsFee, + TxTypeHandler: args.TxTypeHandler, + GasHandler: args.GasHandler, + GasSchedule: args.GasSchedule, + TxLogsProcessor: args.TxLogsProcessor, + BadTxForwarder: args.BadTxForwarder, + EnableRoundsHandler: args.EnableRoundsHandler, + EnableEpochsHandler: args.EnableEpochsHandler, + EnableEpochs: args.EnableEpochs, + VMOutputCacher: args.VMOutputCacher, + WasmVMChangeLocker: args.WasmVMChangeLocker, + IsGenesisProcessing: args.IsGenesisProcessing, + FailedTxLogsAccumulator: args.FailedTxLogsAccumulator, }, } if check.IfNil(epochNotifier) { @@ -169,11 +170,11 @@ func (proxy *scProcessorProxy) IsInterfaceNil() bool { } // EpochConfirmed is called whenever a new epoch is confirmed -func (proxy *scProcessorProxy) EpochConfirmed(_ uint32, _ uint64) { +func (proxy *scProcessorProxy) EpochConfirmed(epoch uint32, _ uint64) { proxy.mutRc.Lock() defer proxy.mutRc.Unlock() - if proxy.args.EnableEpochsHandler.IsFlagEnabled(common.SCProcessorV2Flag) { + if proxy.args.EnableEpochsHandler.IsFlagEnabledInEpoch(common.SCProcessorV2Flag, epoch) { proxy.setActiveProcessorV2() return } diff --git a/process/smartContract/processProxy/processProxy_test.go b/process/smartContract/processProxy/processProxy_test.go index ba0a9c1c0b8..98a56fd0f30 100644 --- a/process/smartContract/processProxy/processProxy_test.go +++ b/process/smartContract/processProxy/processProxy_test.go @@ -23,6 +23,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/multiversx/mx-chain-vm-common-go/builtInFunctions" @@ -39,7 +40,7 @@ func createMockSmartContractProcessorArguments() scrCommon.ArgsNewSmartContractP return scrCommon.ArgsNewSmartContractProcessor{ VmContainer: &mock.VMContainerMock{}, - ArgsParser: &mock.ArgumentParserMock{}, + ArgsParser: &testscommon.ArgumentParserMock{}, Hasher: &hashingMocks.HasherMock{}, Marshalizer: &mock.MarshalizerMock{}, AccountsDB: &stateMock.AccountsStub{ @@ -76,9 +77,10 @@ func createMockSmartContractProcessorArguments() scrCommon.ArgsNewSmartContractP return flag == common.SCDeployFlag }, }, - EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, - WasmVMChangeLocker: &sync.RWMutex{}, - VMOutputCacher: txcache.NewDisabledCache(), + EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, + WasmVMChangeLocker: &sync.RWMutex{}, + VMOutputCacher: txcache.NewDisabledCache(), + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } } @@ -129,7 +131,11 @@ func TestNewSmartContractProcessorProxy(t *testing.T) { t.Parallel() args := createMockSmartContractProcessorArguments() - args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCProcessorV2Flag) + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.SCProcessorV2Flag + }, + } proxy, err := NewSmartContractProcessorProxy(args, &epochNotifierMock.EpochNotifierStub{}) assert.False(t, check.IfNil(proxy)) diff --git a/process/smartContract/processProxy/testProcessProxy.go b/process/smartContract/processProxy/testProcessProxy.go index 31c6514814b..65e5d525565 100644 --- a/process/smartContract/processProxy/testProcessProxy.go +++ b/process/smartContract/processProxy/testProcessProxy.go @@ -28,29 +28,30 @@ type scProcessorTestProxy struct { func NewTestSmartContractProcessorProxy(args scrCommon.ArgsNewSmartContractProcessor, epochNotifier vmcommon.EpochNotifier) (*scProcessorTestProxy, error) { scProcessorTestProxy := &scProcessorTestProxy{ args: scrCommon.ArgsNewSmartContractProcessor{ - VmContainer: args.VmContainer, - ArgsParser: args.ArgsParser, - Hasher: args.Hasher, - Marshalizer: args.Marshalizer, - AccountsDB: args.AccountsDB, - BlockChainHook: args.BlockChainHook, - BuiltInFunctions: args.BuiltInFunctions, - PubkeyConv: args.PubkeyConv, - ShardCoordinator: args.ShardCoordinator, - ScrForwarder: args.ScrForwarder, - TxFeeHandler: args.TxFeeHandler, - EconomicsFee: args.EconomicsFee, - TxTypeHandler: args.TxTypeHandler, - GasHandler: args.GasHandler, - GasSchedule: args.GasSchedule, - TxLogsProcessor: args.TxLogsProcessor, - BadTxForwarder: args.BadTxForwarder, - EnableRoundsHandler: args.EnableRoundsHandler, - EnableEpochsHandler: args.EnableEpochsHandler, - EnableEpochs: args.EnableEpochs, - VMOutputCacher: args.VMOutputCacher, - WasmVMChangeLocker: args.WasmVMChangeLocker, - IsGenesisProcessing: args.IsGenesisProcessing, + VmContainer: args.VmContainer, + ArgsParser: args.ArgsParser, + Hasher: args.Hasher, + Marshalizer: args.Marshalizer, + AccountsDB: args.AccountsDB, + BlockChainHook: args.BlockChainHook, + BuiltInFunctions: args.BuiltInFunctions, + PubkeyConv: args.PubkeyConv, + ShardCoordinator: args.ShardCoordinator, + ScrForwarder: args.ScrForwarder, + TxFeeHandler: args.TxFeeHandler, + EconomicsFee: args.EconomicsFee, + TxTypeHandler: args.TxTypeHandler, + GasHandler: args.GasHandler, + GasSchedule: args.GasSchedule, + TxLogsProcessor: args.TxLogsProcessor, + BadTxForwarder: args.BadTxForwarder, + EnableRoundsHandler: args.EnableRoundsHandler, + EnableEpochsHandler: args.EnableEpochsHandler, + EnableEpochs: args.EnableEpochs, + VMOutputCacher: args.VMOutputCacher, + WasmVMChangeLocker: args.WasmVMChangeLocker, + IsGenesisProcessing: args.IsGenesisProcessing, + FailedTxLogsAccumulator: args.FailedTxLogsAccumulator, }, } @@ -145,11 +146,11 @@ func (proxy *scProcessorTestProxy) IsInterfaceNil() bool { } // EpochConfirmed is called whenever a new epoch is confirmed -func (proxy *scProcessorTestProxy) EpochConfirmed(_ uint32, _ uint64) { +func (proxy *scProcessorTestProxy) EpochConfirmed(epoch uint32, _ uint64) { proxy.mutRc.Lock() defer proxy.mutRc.Unlock() - if proxy.args.EnableEpochsHandler.IsFlagEnabled(common.SCProcessorV2Flag) { + if proxy.args.EnableEpochsHandler.IsFlagEnabledInEpoch(common.SCProcessorV2Flag, epoch) { proxy.setActiveProcessorV2() return } diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index 014a1751495..29c1e082be3 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -13,6 +13,13 @@ import ( "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" vmData "github.com/multiversx/mx-chain-core-go/data/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-vm-common-go/builtInFunctions" + "github.com/multiversx/mx-chain-vm-common-go/parsers" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/enablers" "github.com/multiversx/mx-chain-go/common/forking" @@ -32,15 +39,10 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/testscommon/vmcommonMocks" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/multiversx/mx-chain-vm-common-go/builtInFunctions" - "github.com/multiversx/mx-chain-vm-common-go/parsers" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const setGuardianCost = 250000 @@ -83,7 +85,7 @@ func createMockSmartContractProcessorArguments() scrCommon.ArgsNewSmartContractP return scrCommon.ArgsNewSmartContractProcessor{ VmContainer: &mock.VMContainerMock{}, - ArgsParser: &mock.ArgumentParserMock{}, + ArgsParser: &testscommon.ArgumentParserMock{}, Hasher: &hashingMocks.HasherMock{}, Marshalizer: &mock.MarshalizerMock{}, AccountsDB: &stateMock.AccountsStub{ @@ -114,11 +116,12 @@ func createMockSmartContractProcessorArguments() scrCommon.ArgsNewSmartContractP GasHandler: &testscommon.GasHandlerStub{ SetGasRefundedCalled: func(gasRefunded uint64, hash []byte) {}, }, - GasSchedule: testscommon.NewGasScheduleNotifierMock(gasSchedule), - EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag), - WasmVMChangeLocker: &sync.RWMutex{}, - VMOutputCacher: txcache.NewDisabledCache(), + GasSchedule: testscommon.NewGasScheduleNotifierMock(gasSchedule), + EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCDeployFlag), + WasmVMChangeLocker: &sync.RWMutex{}, + VMOutputCacher: txcache.NewDisabledCache(), + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } } @@ -457,7 +460,7 @@ func TestGasScheduleChangeShouldWork(t *testing.T) { func TestScProcessor_DeploySmartContractBadParse(t *testing.T) { t.Parallel() - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = &mock.VMContainerMock{} arguments.ArgsParser = argParser @@ -631,13 +634,13 @@ func TestScProcessor_BuiltInCallSmartContractSenderFailed(t *testing.T) { scrAdded := false badTxAdded := false arguments.BadTxForwarder = &mock.IntermediateTransactionHandlerMock{ - AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler, key []byte) error { badTxAdded = true return nil }, } arguments.ScrForwarder = &mock.IntermediateTransactionHandlerMock{ - AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler, key []byte) error { scrAdded = true return nil }, @@ -887,7 +890,7 @@ func TestScProcessor_DeploySmartContractWrongTx(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -909,7 +912,7 @@ func TestScProcessor_DeploySmartContractNilTx(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -931,7 +934,7 @@ func TestScProcessor_DeploySmartContractNotEmptyDestinationAddress(t *testing.T) t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -954,7 +957,7 @@ func TestScProcessor_DeploySmartContractCalculateHashFails(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -986,7 +989,7 @@ func TestScProcessor_DeploySmartContractEconomicsFeeValidateFails(t *testing.T) expectedError := errors.New("expected error") vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1017,7 +1020,7 @@ func TestScProcessor_DeploySmartContractEconomicsFeeSaveAccountsFails(t *testing expectedError := errors.New("expected error") vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1380,7 +1383,7 @@ func TestScProcessor_DeploySmartContractAddIntermediateTxFails(t *testing.T) { arguments := createMockSmartContractProcessorArguments() arguments.ArgsParser = argParser arguments.ScrForwarder = &mock.IntermediateTransactionHandlerMock{ - AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler, key []byte) error { return expectedError }, } @@ -1415,7 +1418,7 @@ func TestScProcessor_DeploySmartContractComputeRewardsFails(t *testing.T) { arguments := createMockSmartContractProcessorArguments() arguments.ArgsParser = argParser arguments.ScrForwarder = &mock.IntermediateTransactionHandlerMock{ - AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler, key []byte) error { return expectedError }, } @@ -1476,7 +1479,7 @@ func TestScProcessor_ExecuteSmartContractTransactionNilTx(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1500,7 +1503,7 @@ func TestScProcessor_ExecuteSmartContractTransactionNilAccount(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1533,7 +1536,7 @@ func TestScProcessor_ExecuteSmartContractTransactionBadParser(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1565,7 +1568,7 @@ func TestScProcessor_ExecuteSmartContractTransactionVMRunError(t *testing.T) { t.Parallel() vmContainer := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vmContainer arguments.ArgsParser = argParser @@ -1702,7 +1705,7 @@ func TestScProcessor_ExecuteSmartContractTransaction(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} accntState := &stateMock.AccountsStub{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm @@ -1735,7 +1738,7 @@ func TestScProcessor_ExecuteSmartContractTransactionSaveLogCalled(t *testing.T) slCalled := false vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} accntState := &stateMock.AccountsStub{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm @@ -1772,7 +1775,7 @@ func TestScProcessor_CreateVMCallInputWrongCode(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1800,7 +1803,7 @@ func TestScProcessor_CreateVMCallInput(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1824,7 +1827,7 @@ func TestScProcessor_CreateVMDeployBadCode(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1851,7 +1854,7 @@ func TestScProcessor_CreateVMDeployInput(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1915,7 +1918,7 @@ func TestScProcessor_CreateVMDeployInputWrongArgument(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1944,7 +1947,7 @@ func TestScProcessor_InitializeVMInputFromTx_ShouldErrNotEnoughGas(t *testing.T) t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1974,7 +1977,7 @@ func TestScProcessor_InitializeVMInputFromTx(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2011,7 +2014,7 @@ func TestScProcessor_processVMOutputNilSndAcc(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2040,7 +2043,7 @@ func TestScProcessor_processVMOutputNilDstAcc(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} accntState := &stateMock.AccountsStub{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm @@ -2084,7 +2087,7 @@ func TestScProcessor_GetAccountFromAddressAccNotFound(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2115,7 +2118,7 @@ func TestScProcessor_GetAccountFromAddrFailedGetExistingAccount(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2147,7 +2150,7 @@ func TestScProcessor_GetAccountFromAddrAccNotInShard(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2180,7 +2183,7 @@ func TestScProcessor_GetAccountFromAddr(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2215,7 +2218,7 @@ func TestScProcessor_DeleteAccountsFailedAtRemove(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2250,7 +2253,7 @@ func TestScProcessor_DeleteAccountsNotInShard(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2289,7 +2292,7 @@ func TestScProcessor_DeleteAccountsInShard(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -3339,12 +3342,6 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test _, err = sc.ProcessSmartContractResult(&scr) require.Nil(t, err) require.True(t, executeCalled) - - executeCalled = false - enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionsFlag, common.BuiltInFunctionOnMetaFlag) - _, err = sc.ProcessSmartContractResult(&scr) - require.Nil(t, err) - require.False(t, executeCalled) } func TestScProcessor_ProcessRelayedSCRValueBackToRelayer(t *testing.T) { @@ -3749,7 +3746,7 @@ func TestSmartContractProcessor_computeTotalConsumedFeeAndDevRwdWithDifferentSCC feeHandler, err := economics.NewEconomicsData(*args) require.Nil(t, err) require.NotNil(t, feeHandler) - arguments.TxFeeHandler, _ = postprocess.NewFeeAccumulator() + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.EconomicsFee = feeHandler arguments.ShardCoordinator = shardCoordinator @@ -3836,9 +3833,7 @@ func TestSmartContractProcessor_finishSCExecutionV2(t *testing.T) { arguments.EconomicsFee, err = economics.NewEconomicsData(*args) require.Nil(t, err) - arguments.TxFeeHandler, err = postprocess.NewFeeAccumulator() - require.Nil(t, err) - + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.ShardCoordinator = shardCoordinator arguments.AccountsDB = &stateMock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { @@ -4253,8 +4248,7 @@ func createRealEconomicsDataArgs() *economics.ArgsNewEconomicsData { return flag == common.GasPriceModifierFlag }, }, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } } @@ -4404,7 +4398,7 @@ func TestScProcessor_CheckBuiltinFunctionIsExecutable(t *testing.T) { }) t.Run("", func(t *testing.T) { argsCopy := arguments - argsCopy.ArgsParser = &mock.ArgumentParserMock{ + argsCopy.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return "", nil, expectedErr }, @@ -4415,7 +4409,7 @@ func TestScProcessor_CheckBuiltinFunctionIsExecutable(t *testing.T) { }) t.Run("expected builtin function different than the parsed function name should return error", func(t *testing.T) { argsCopy := arguments - argsCopy.ArgsParser = &mock.ArgumentParserMock{ + argsCopy.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return "differentFunction", nil, nil }, @@ -4426,7 +4420,7 @@ func TestScProcessor_CheckBuiltinFunctionIsExecutable(t *testing.T) { }) t.Run("prepare gas provided with error should error", func(t *testing.T) { argsCopy := arguments - argsCopy.ArgsParser = &mock.ArgumentParserMock{ + argsCopy.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return "SetGuardian", nil, nil }, @@ -4444,7 +4438,7 @@ func TestScProcessor_CheckBuiltinFunctionIsExecutable(t *testing.T) { }) t.Run("builtin function not found should error", func(t *testing.T) { argsCopy := arguments - argsCopy.ArgsParser = &mock.ArgumentParserMock{ + argsCopy.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return "SetGuardian", nil, nil }, @@ -4465,7 +4459,7 @@ func TestScProcessor_CheckBuiltinFunctionIsExecutable(t *testing.T) { }) t.Run("builtin function not supporting executable check should error", func(t *testing.T) { argsCopy := arguments - argsCopy.ArgsParser = &mock.ArgumentParserMock{ + argsCopy.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return "SetGuardian", nil, nil }, @@ -4485,7 +4479,7 @@ func TestScProcessor_CheckBuiltinFunctionIsExecutable(t *testing.T) { }) t.Run("OK", func(t *testing.T) { argsCopy := arguments - argsCopy.ArgsParser = &mock.ArgumentParserMock{ + argsCopy.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return "SetGuardian", nil, nil }, diff --git a/process/smartContract/processorV2/processV2.go b/process/smartContract/processorV2/processV2.go index 938bfe725c3..dafa4f9712f 100644 --- a/process/smartContract/processorV2/processV2.go +++ b/process/smartContract/processorV2/processV2.go @@ -18,6 +18,12 @@ import ( vmData "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + logger "github.com/multiversx/mx-chain-logger-go" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-vm-common-go/parsers" + "github.com/multiversx/mx-chain-vm-go/vmhost" + "github.com/multiversx/mx-chain-vm-go/vmhost/contexts" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" @@ -27,11 +33,6 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/vm" - logger "github.com/multiversx/mx-chain-logger-go" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/multiversx/mx-chain-vm-common-go/parsers" - "github.com/multiversx/mx-chain-vm-go/vmhost" - "github.com/multiversx/mx-chain-vm-go/vmhost/contexts" ) var _ process.SmartContractResultProcessor = (*scProcessor)(nil) @@ -80,13 +81,14 @@ type scProcessor struct { txTypeHandler process.TxTypeHandler gasHandler process.GasHandler - builtInGasCosts map[string]uint64 - persistPerByte uint64 - storePerByte uint64 - mutGasLock sync.RWMutex - txLogsProcessor process.TransactionLogProcessor - vmOutputCacher storage.Cacher - isGenesisProcessing bool + builtInGasCosts map[string]uint64 + persistPerByte uint64 + storePerByte uint64 + mutGasLock sync.RWMutex + txLogsProcessor process.TransactionLogProcessor + failedTxLogsAccumulator process.FailedTxLogsAccumulator + vmOutputCacher storage.Cacher + isGenesisProcessing bool executableCheckers map[string]scrCommon.ExecutableChecker mutExecutableCheckers sync.RWMutex @@ -160,12 +162,13 @@ func NewSmartContractProcessorV2(args scrCommon.ArgsNewSmartContractProcessor) ( if check.IfNil(args.TxLogsProcessor) { return nil, process.ErrNilTxLogsProcessor } + if check.IfNil(args.FailedTxLogsAccumulator) { + return nil, process.ErrNilFailedTxLogsAccumulator + } if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } - err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ - common.BuiltInFunctionOnMetaFlag, - }) + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{}) if err != nil { return nil, err } @@ -185,30 +188,31 @@ func NewSmartContractProcessorV2(args scrCommon.ArgsNewSmartContractProcessor) ( builtInFuncCost := args.GasSchedule.LatestGasSchedule()[common.BuiltInCost] baseOperationCost := args.GasSchedule.LatestGasSchedule()[common.BaseOperationCost] sc := &scProcessor{ - vmContainer: args.VmContainer, - argsParser: args.ArgsParser, - hasher: args.Hasher, - marshalizer: args.Marshalizer, - accounts: args.AccountsDB, - blockChainHook: args.BlockChainHook, - pubkeyConv: args.PubkeyConv, - shardCoordinator: args.ShardCoordinator, - scrForwarder: args.ScrForwarder, - txFeeHandler: args.TxFeeHandler, - economicsFee: args.EconomicsFee, - txTypeHandler: args.TxTypeHandler, - gasHandler: args.GasHandler, - builtInGasCosts: builtInFuncCost, - txLogsProcessor: args.TxLogsProcessor, - badTxForwarder: args.BadTxForwarder, - builtInFunctions: args.BuiltInFunctions, - isGenesisProcessing: args.IsGenesisProcessing, - arwenChangeLocker: args.WasmVMChangeLocker, - vmOutputCacher: args.VMOutputCacher, - enableEpochsHandler: args.EnableEpochsHandler, - storePerByte: baseOperationCost["StorePerByte"], - persistPerByte: baseOperationCost["PersistPerByte"], - executableCheckers: scrCommon.CreateExecutableCheckersMap(args.BuiltInFunctions), + vmContainer: args.VmContainer, + argsParser: args.ArgsParser, + hasher: args.Hasher, + marshalizer: args.Marshalizer, + accounts: args.AccountsDB, + blockChainHook: args.BlockChainHook, + pubkeyConv: args.PubkeyConv, + shardCoordinator: args.ShardCoordinator, + scrForwarder: args.ScrForwarder, + txFeeHandler: args.TxFeeHandler, + economicsFee: args.EconomicsFee, + txTypeHandler: args.TxTypeHandler, + gasHandler: args.GasHandler, + builtInGasCosts: builtInFuncCost, + txLogsProcessor: args.TxLogsProcessor, + failedTxLogsAccumulator: args.FailedTxLogsAccumulator, + badTxForwarder: args.BadTxForwarder, + builtInFunctions: args.BuiltInFunctions, + isGenesisProcessing: args.IsGenesisProcessing, + arwenChangeLocker: args.WasmVMChangeLocker, + vmOutputCacher: args.VMOutputCacher, + enableEpochsHandler: args.EnableEpochsHandler, + storePerByte: baseOperationCost["StorePerByte"], + persistPerByte: baseOperationCost["PersistPerByte"], + executableCheckers: scrCommon.CreateExecutableCheckersMap(args.BuiltInFunctions), } sc.esdtTransferParser, err = parsers.NewESDTTransferParser(args.Marshalizer) @@ -528,7 +532,7 @@ func (sc *scProcessor) finishSCExecution( return 0, err } - err = sc.scrForwarder.AddIntermediateTransactions(finalResults) + err = sc.scrForwarder.AddIntermediateTransactions(finalResults, txHash) if err != nil { log.Error("AddIntermediateTransactions error", "error", err.Error()) return 0, err @@ -826,10 +830,10 @@ func (sc *scProcessor) saveAccounts(acntSnd, acntDst vmcommon.AccountHandler) er func (sc *scProcessor) resolveFailedTransaction( _ state.UserAccountHandler, tx data.TransactionHandler, - _ []byte, + txHash []byte, ) error { if _, ok := tx.(*transaction.Transaction); ok { - err := sc.badTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{tx}) + err := sc.badTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{tx}, txHash) if err != nil { return err } @@ -1407,19 +1411,20 @@ func (sc *scProcessor) isCrossShardESDTTransfer(sender []byte, receiver []byte, func (sc *scProcessor) getOriginalTxHashIfIntraShardRelayedSCR( tx data.TransactionHandler, - txHash []byte) []byte { + txHash []byte, +) ([]byte, bool) { relayedSCR, isRelayed := isRelayedTx(tx) if !isRelayed { - return txHash + return txHash, isRelayed } sndShardID := sc.shardCoordinator.ComputeId(relayedSCR.SndAddr) rcvShardID := sc.shardCoordinator.ComputeId(relayedSCR.RcvAddr) if sndShardID != rcvShardID { - return txHash + return txHash, isRelayed } - return relayedSCR.OriginalTxHash + return relayedSCR.OriginalTxHash, isRelayed } // ProcessIfError creates a smart contract result, consumes the gas and returns the value to the user @@ -1489,7 +1494,7 @@ func (sc *scProcessor) processIfErrorWithAddedLogs(acntSnd state.UserAccountHand isRecvSelfShard := sc.shardCoordinator.SelfId() == sc.shardCoordinator.ComputeId(scrIfError.RcvAddr) if !isRecvSelfShard && !sc.isInformativeTxHandler(scrIfError) { - err = sc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrIfError}) + err = sc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrIfError}, failureContext.txHash) if err != nil { return err } @@ -1509,10 +1514,15 @@ func (sc *scProcessor) processIfErrorWithAddedLogs(acntSnd state.UserAccountHand processIfErrorLogs = append(processIfErrorLogs, failureContext.logs...) } - logsTxHash := sc.getOriginalTxHashIfIntraShardRelayedSCR(tx, failureContext.txHash) - ignorableError := sc.txLogsProcessor.SaveLog(logsTxHash, tx, processIfErrorLogs) + logsTxHash, isRelayed := sc.getOriginalTxHashIfIntraShardRelayedSCR(tx, failureContext.txHash) + var ignorableError error + if isRelayed { + ignorableError = sc.failedTxLogsAccumulator.SaveLogs(logsTxHash, tx, processIfErrorLogs) + } else { + ignorableError = sc.txLogsProcessor.SaveLog(logsTxHash, tx, processIfErrorLogs) + } if ignorableError != nil { - log.Debug("scProcessor.ProcessIfError() txLogsProcessor.SaveLog()", "error", ignorableError.Error()) + log.Debug("scProcessor.ProcessIfError() save log", "error", ignorableError.Error(), "isRelayed", isRelayed) } txType, _ := sc.txTypeHandler.ComputeTransactionType(tx) @@ -1615,7 +1625,7 @@ func (sc *scProcessor) processForRelayerWhenError( } if scrForRelayer.Value.Cmp(zero) > 0 { - err = sc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrForRelayer}) + err = sc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrForRelayer}, txHash) if err != nil { return nil, err } @@ -1867,7 +1877,7 @@ func (sc *scProcessor) doDeploySmartContract( return 0, err } - err = sc.scrForwarder.AddIntermediateTransactions(finalResults) + err = sc.scrForwarder.AddIntermediateTransactions(finalResults, txHash) if err != nil { log.Debug("AddIntermediate Transaction error", "error", err.Error()) return 0, err @@ -2735,7 +2745,7 @@ func (sc *scProcessor) ProcessSmartContractResult(scr *smartContractResult.Smart returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err case process.BuiltInFunctionCall: - if sc.shardCoordinator.SelfId() == core.MetachainShardId && !sc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag) { + if sc.shardCoordinator.SelfId() == core.MetachainShardId { returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err } diff --git a/process/smartContract/processorV2/process_test.go b/process/smartContract/processorV2/process_test.go index 01a623cbe26..8722991d0a7 100644 --- a/process/smartContract/processorV2/process_test.go +++ b/process/smartContract/processorV2/process_test.go @@ -15,6 +15,14 @@ import ( "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" vmData "github.com/multiversx/mx-chain-core-go/data/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-vm-common-go/builtInFunctions" + "github.com/multiversx/mx-chain-vm-common-go/parsers" + "github.com/multiversx/mx-chain-vm-go/vmhost" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" @@ -35,16 +43,10 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" testsCommonStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/vmcommonMocks" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/multiversx/mx-chain-vm-common-go/builtInFunctions" - "github.com/multiversx/mx-chain-vm-common-go/parsers" - "github.com/multiversx/mx-chain-vm-go/vmhost" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const maxEpoch = math.MaxUint32 @@ -93,7 +95,7 @@ func createMockSmartContractProcessorArguments() scrCommon.ArgsNewSmartContractP return scrCommon.ArgsNewSmartContractProcessor{ VmContainer: &mock.VMContainerMock{}, - ArgsParser: &mock.ArgumentParserMock{}, + ArgsParser: &testscommon.ArgumentParserMock{}, Hasher: &hashingMocks.HasherMock{}, Marshalizer: &mock.MarshalizerMock{}, AccountsDB: &stateMock.AccountsStub{ @@ -129,9 +131,10 @@ func createMockSmartContractProcessorArguments() scrCommon.ArgsNewSmartContractP return flag == common.SCDeployFlag }, }, - GasSchedule: testscommon.NewGasScheduleNotifierMock(gasSchedule), - WasmVMChangeLocker: &sync.RWMutex{}, - VMOutputCacher: txcache.NewDisabledCache(), + GasSchedule: testscommon.NewGasScheduleNotifierMock(gasSchedule), + WasmVMChangeLocker: &sync.RWMutex{}, + VMOutputCacher: txcache.NewDisabledCache(), + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } } @@ -334,6 +337,17 @@ func TestNewSmartContractProcessor_NilTxLogsProcessorShouldErr(t *testing.T) { require.Equal(t, process.ErrNilTxLogsProcessor, err) } +func TestNewSmartContractProcessor_NilFailedTxLogsAccumulatorShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockSmartContractProcessorArguments() + arguments.FailedTxLogsAccumulator = nil + sc, err := NewSmartContractProcessorV2(arguments) + + require.Nil(t, sc) + require.Equal(t, process.ErrNilFailedTxLogsAccumulator, err) +} + func TestNewSmartContractProcessor_NilBadTxForwarderShouldErr(t *testing.T) { t.Parallel() @@ -371,7 +385,6 @@ func TestNewSmartContractProcessorVerifyAllMembers(t *testing.T) { t.Parallel() arguments := createMockSmartContractProcessorArguments() - arguments.EnableEpochs.BuiltInFunctionOnMetaEnableEpoch = 10 sc, _ := NewSmartContractProcessorV2(arguments) assert.Equal(t, arguments.VmContainer, sc.vmContainer) @@ -439,7 +452,7 @@ func createTxLogsProcessor() process.TransactionLogProcessor { func TestScProcessor_DeploySmartContractBadParse(t *testing.T) { t.Parallel() - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = &mock.VMContainerMock{} arguments.ArgsParser = argParser @@ -554,13 +567,13 @@ func TestScProcessor_BuiltInCallSmartContractSenderFailed(t *testing.T) { scrAdded := false badTxAdded := false arguments.BadTxForwarder = &mock.IntermediateTransactionHandlerMock{ - AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler, key []byte) error { badTxAdded = true return nil }, } arguments.ScrForwarder = &mock.IntermediateTransactionHandlerMock{ - AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler, key []byte) error { scrAdded = true return nil }, @@ -909,7 +922,7 @@ func TestScProcessor_DeploySmartContractWrongTx(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -931,7 +944,7 @@ func TestScProcessor_DeploySmartContractNilTx(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -953,7 +966,7 @@ func TestScProcessor_DeploySmartContractNotEmptyDestinationAddress(t *testing.T) t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -976,7 +989,7 @@ func TestScProcessor_DeploySmartContractCalculateHashFails(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1008,7 +1021,7 @@ func TestScProcessor_DeploySmartContractEconomicsFeeValidateFails(t *testing.T) expectedError := errors.New("expected error") vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1039,7 +1052,7 @@ func TestScProcessor_DeploySmartContractEconomicsFeeSaveAccountsFails(t *testing expectedError := errors.New("expected error") vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1402,7 +1415,7 @@ func TestScProcessor_DeploySmartContractAddIntermediateTxFails(t *testing.T) { arguments := createMockSmartContractProcessorArguments() arguments.ArgsParser = argParser arguments.ScrForwarder = &mock.IntermediateTransactionHandlerMock{ - AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler, key []byte) error { return expectedError }, } @@ -1437,7 +1450,7 @@ func TestScProcessor_DeploySmartContractComputeRewardsFails(t *testing.T) { arguments := createMockSmartContractProcessorArguments() arguments.ArgsParser = argParser arguments.ScrForwarder = &mock.IntermediateTransactionHandlerMock{ - AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler, key []byte) error { return expectedError }, } @@ -1498,7 +1511,7 @@ func TestScProcessor_ExecuteSmartContractTransactionNilTx(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1522,7 +1535,7 @@ func TestScProcessor_ExecuteSmartContractTransactionNilAccount(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1555,7 +1568,7 @@ func TestScProcessor_ExecuteSmartContractTransactionBadParser(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1587,7 +1600,7 @@ func TestScProcessor_ExecuteSmartContractTransactionVMRunError(t *testing.T) { t.Parallel() vmContainer := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vmContainer arguments.ArgsParser = argParser @@ -1724,7 +1737,7 @@ func TestScProcessor_ExecuteSmartContractTransaction(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} accntState := &stateMock.AccountsStub{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm @@ -1757,7 +1770,7 @@ func TestScProcessor_ExecuteSmartContractTransactionSaveLogCalled(t *testing.T) slCalled := false vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} accntState := &stateMock.AccountsStub{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm @@ -1794,7 +1807,7 @@ func TestScProcessor_CreateVMCallInputWrongCode(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1822,7 +1835,7 @@ func TestScProcessor_CreateVMCallInput(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1846,7 +1859,7 @@ func TestScProcessor_CreateVMDeployBadCode(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1873,7 +1886,7 @@ func TestScProcessor_CreateVMCallInputBadAsync(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1903,7 +1916,7 @@ func TestScProcessor_CreateVMDeployInput(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1967,7 +1980,7 @@ func TestScProcessor_CreateVMDeployInputWrongArgument(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -1996,7 +2009,7 @@ func TestScProcessor_InitializeVMInputFromTx_ShouldErrNotEnoughGas(t *testing.T) t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2026,7 +2039,7 @@ func TestScProcessor_InitializeVMInputFromTx(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2063,7 +2076,7 @@ func TestScProcessor_processVMOutputNilSndAcc(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2092,7 +2105,7 @@ func TestScProcessor_processVMOutputNilDstAcc(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} accntState := &stateMock.AccountsStub{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm @@ -2136,7 +2149,7 @@ func TestScProcessor_GetAccountFromAddressAccNotFound(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2167,7 +2180,7 @@ func TestScProcessor_GetAccountFromAddrFailedGetExistingAccount(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2199,7 +2212,7 @@ func TestScProcessor_GetAccountFromAddrAccNotInShard(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2232,7 +2245,7 @@ func TestScProcessor_GetAccountFromAddr(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2267,7 +2280,7 @@ func TestScProcessor_DeleteAccountsFailedAtRemove(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2302,7 +2315,7 @@ func TestScProcessor_DeleteAccountsNotInShard(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -2342,7 +2355,7 @@ func TestScProcessor_DeleteAccountsInShard(t *testing.T) { } vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm arguments.ArgsParser = argParser @@ -3273,12 +3286,6 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test _, err = sc.ProcessSmartContractResult(&scr) require.Nil(t, err) require.True(t, executeCalled) - - executeCalled = false - enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionOnMetaFlag) - _, err = sc.ProcessSmartContractResult(&scr) - require.Nil(t, err) - require.False(t, executeCalled) } func TestScProcessor_ProcessRelayedSCRValueBackToRelayer(t *testing.T) { @@ -3337,6 +3344,13 @@ func TestScProcessor_ProcessRelayedSCRValueBackToRelayer(t *testing.T) { return process.SCInvoking, process.SCInvoking }, } + wasSaveLogsCalled := false + arguments.FailedTxLogsAccumulator = &processMocks.FailedTxLogsAccumulatorMock{ + SaveLogsCalled: func(txHash []byte, tx data.TransactionHandler, logs []*vmcommon.LogEntry) error { + wasSaveLogsCalled = true + return nil + }, + } sc, err := NewSmartContractProcessorV2(arguments) require.NotNil(t, sc) require.Nil(t, err) @@ -3359,6 +3373,7 @@ func TestScProcessor_ProcessRelayedSCRValueBackToRelayer(t *testing.T) { userFinalValue := baseValue.Sub(baseValue, scr.Value) userFinalValue.Add(userFinalValue, userReturnValue) require.True(t, userAcc.GetBalance().Cmp(userFinalValue) == 0) + require.True(t, wasSaveLogsCalled) } func TestScProcessor_checkUpgradePermission(t *testing.T) { @@ -3704,7 +3719,7 @@ func TestSmartContractProcessor_computeTotalConsumedFeeAndDevRwdWithDifferentSCC feeHandler, err := economics.NewEconomicsData(*args) require.Nil(t, err) require.NotNil(t, feeHandler) - arguments.TxFeeHandler, _ = postprocess.NewFeeAccumulator() + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.EconomicsFee = feeHandler arguments.ShardCoordinator = shardCoordinator @@ -3790,9 +3805,7 @@ func TestSmartContractProcessor_finishSCExecutionV2(t *testing.T) { arguments.EconomicsFee, err = economics.NewEconomicsData(*args) require.Nil(t, err) - arguments.TxFeeHandler, err = postprocess.NewFeeAccumulator() - require.Nil(t, err) - + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.ShardCoordinator = shardCoordinator arguments.AccountsDB = &stateMock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { @@ -4070,18 +4083,20 @@ func TestProcessGetOriginalTxHashForRelayedIntraShard(t *testing.T) { scr := &smartContractResult.SmartContractResult{Value: big.NewInt(1), SndAddr: bytes.Repeat([]byte{1}, 32)} scrHash := []byte("hash") - logHash := sc.getOriginalTxHashIfIntraShardRelayedSCR(scr, scrHash) + logHash, isRelayed := sc.getOriginalTxHashIfIntraShardRelayedSCR(scr, scrHash) assert.Equal(t, scrHash, logHash) + assert.False(t, isRelayed) scr.OriginalTxHash = []byte("originalHash") scr.RelayerAddr = bytes.Repeat([]byte{1}, 32) scr.SndAddr = bytes.Repeat([]byte{1}, 32) scr.RcvAddr = bytes.Repeat([]byte{1}, 32) - logHash = sc.getOriginalTxHashIfIntraShardRelayedSCR(scr, scrHash) + logHash, isRelayed = sc.getOriginalTxHashIfIntraShardRelayedSCR(scr, scrHash) assert.Equal(t, scr.OriginalTxHash, logHash) + assert.True(t, isRelayed) scr.RcvAddr = bytes.Repeat([]byte{2}, 32) - logHash = sc.getOriginalTxHashIfIntraShardRelayedSCR(scr, scrHash) + logHash, _ = sc.getOriginalTxHashIfIntraShardRelayedSCR(scr, scrHash) assert.Equal(t, scrHash, logHash) } @@ -4191,8 +4206,7 @@ func createRealEconomicsDataArgs() *economics.ArgsNewEconomicsData { return flag == common.GasPriceModifierFlag }, }, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } } @@ -4353,7 +4367,7 @@ func TestSCProcessor_PrependAsyncParamsToData(t *testing.T) { func TestScProcessor_ForbidMultiLevelAsync(t *testing.T) { t.Parallel() vm := &mock.VMContainerMock{} - argParser := &mock.ArgumentParserMock{} + argParser := &testscommon.ArgumentParserMock{} accntState := &stateMock.AccountsStub{} arguments := createMockSmartContractProcessorArguments() arguments.VmContainer = vm diff --git a/process/smartContract/processorV2/vmInputV2.go b/process/smartContract/processorV2/vmInputV2.go index 35e68776907..06c4c3f0ad2 100644 --- a/process/smartContract/processorV2/vmInputV2.go +++ b/process/smartContract/processorV2/vmInputV2.go @@ -39,6 +39,12 @@ func (sc *scProcessor) initializeVMInputFromTx(vmInput *vmcommon.VMInput, tx dat vmInput.CallerAddr = tx.GetSndAddr() vmInput.CallValue = new(big.Int).Set(tx.GetValue()) vmInput.GasPrice = tx.GetGasPrice() + + relayedTx, isRelayed := isRelayedTx(tx) + if isRelayed { + vmInput.RelayerAddr = relayedTx.RelayerAddr + } + vmInput.GasProvided, err = sc.prepareGasProvided(tx) if err != nil { return err diff --git a/process/smartContract/scQueryService.go b/process/smartContract/scQueryService.go index eb3d9b95e4e..b23c8c9d13d 100644 --- a/process/smartContract/scQueryService.go +++ b/process/smartContract/scQueryService.go @@ -22,53 +22,57 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/smartContract/scrCommon" "github.com/multiversx/mx-chain-go/sharding" + logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/multiversx/mx-chain-vm-common-go/parsers" ) var _ process.SCQueryService = (*SCQueryService)(nil) +var logQueryService = logger.GetOrCreate("process/smartcontract.queryService") + // MaxGasLimitPerQuery - each unit is the equivalent of 1 nanosecond processing time const MaxGasLimitPerQuery = 300_000_000_000 // SCQueryService can execute Get functions over SC to fetch stored values type SCQueryService struct { - vmContainer process.VirtualMachinesContainer - economicsFee process.FeeHandler - mutRunSc sync.Mutex - blockChainHook process.BlockChainHookWithAccountsAdapter - mainBlockChain data.ChainHandler - apiBlockChain data.ChainHandler - numQueries int - gasForQuery uint64 - wasmVMChangeLocker common.Locker - bootstrapper process.Bootstrapper - allowExternalQueriesChan chan struct{} - historyRepository dblookupext.HistoryRepository - shardCoordinator sharding.Coordinator - storageService dataRetriever.StorageService - marshaller marshal.Marshalizer - hasher hashing.Hasher - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + vmContainer process.VirtualMachinesContainer + economicsFee process.FeeHandler + mutRunSc sync.Mutex + blockChainHook process.BlockChainHookWithAccountsAdapter + mainBlockChain data.ChainHandler + apiBlockChain data.ChainHandler + gasForQuery uint64 + wasmVMChangeLocker common.Locker + bootstrapper process.Bootstrapper + allowExternalQueriesChan chan struct{} + historyRepository dblookupext.HistoryRepository + shardCoordinator sharding.Coordinator + storageService dataRetriever.StorageService + marshaller marshal.Marshalizer + hasher hashing.Hasher + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + isInHistoricalBalancesMode bool } // ArgsNewSCQueryService defines the arguments needed for the sc query service type ArgsNewSCQueryService struct { - VmContainer process.VirtualMachinesContainer - EconomicsFee process.FeeHandler - BlockChainHook process.BlockChainHookWithAccountsAdapter - MainBlockChain data.ChainHandler - APIBlockChain data.ChainHandler - WasmVMChangeLocker common.Locker - Bootstrapper process.Bootstrapper - AllowExternalQueriesChan chan struct{} - MaxGasLimitPerQuery uint64 - HistoryRepository dblookupext.HistoryRepository - ShardCoordinator sharding.Coordinator - StorageService dataRetriever.StorageService - Marshaller marshal.Marshalizer - Hasher hashing.Hasher - Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + VmContainer process.VirtualMachinesContainer + EconomicsFee process.FeeHandler + BlockChainHook process.BlockChainHookWithAccountsAdapter + MainBlockChain data.ChainHandler + APIBlockChain data.ChainHandler + WasmVMChangeLocker common.Locker + Bootstrapper process.Bootstrapper + AllowExternalQueriesChan chan struct{} + MaxGasLimitPerQuery uint64 + HistoryRepository dblookupext.HistoryRepository + ShardCoordinator sharding.Coordinator + StorageService dataRetriever.StorageService + Marshaller marshal.Marshalizer + Hasher hashing.Hasher + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + IsInHistoricalBalancesMode bool } // NewSCQueryService returns a new instance of SCQueryService @@ -85,21 +89,22 @@ func NewSCQueryService( gasForQuery = args.MaxGasLimitPerQuery } return &SCQueryService{ - vmContainer: args.VmContainer, - economicsFee: args.EconomicsFee, - mainBlockChain: args.MainBlockChain, - apiBlockChain: args.APIBlockChain, - blockChainHook: args.BlockChainHook, - wasmVMChangeLocker: args.WasmVMChangeLocker, - bootstrapper: args.Bootstrapper, - gasForQuery: gasForQuery, - allowExternalQueriesChan: args.AllowExternalQueriesChan, - historyRepository: args.HistoryRepository, - shardCoordinator: args.ShardCoordinator, - storageService: args.StorageService, - marshaller: args.Marshaller, - hasher: args.Hasher, - uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + vmContainer: args.VmContainer, + economicsFee: args.EconomicsFee, + mainBlockChain: args.MainBlockChain, + apiBlockChain: args.APIBlockChain, + blockChainHook: args.BlockChainHook, + wasmVMChangeLocker: args.WasmVMChangeLocker, + bootstrapper: args.Bootstrapper, + gasForQuery: gasForQuery, + allowExternalQueriesChan: args.AllowExternalQueriesChan, + historyRepository: args.HistoryRepository, + shardCoordinator: args.ShardCoordinator, + storageService: args.StorageService, + marshaller: args.Marshaller, + hasher: args.Hasher, + uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + isInHistoricalBalancesMode: args.IsInHistoricalBalancesMode, }, nil } @@ -179,8 +184,7 @@ func (service *SCQueryService) shouldAllowQueriesExecution() bool { } func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice uint64) (*vmcommon.VMOutput, common.BlockInfo, error) { - log.Trace("executeScCall", "function", query.FuncName, "numQueries", service.numQueries) - service.numQueries++ + logQueryService.Trace("executeScCall", "address", query.ScAddress, "function", query.FuncName, "blockNonce", query.BlockNonce.Value, "blockHash", query.BlockHash) shouldEarlyExitBecauseOfSyncState := query.ShouldBeSynced && service.bootstrapper.GetNodeState() == common.NsNotSynchronized if shouldEarlyExitBecauseOfSyncState { @@ -198,11 +202,11 @@ func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice ui return nil, nil, err } - accountsAdapter := service.blockChainHook.GetAccountsAdapter() - err = accountsAdapter.RecreateTrie(blockRootHash) + err = service.recreateTrie(blockRootHash, blockHeader) if err != nil { return nil, nil, err } + service.blockChainHook.SetCurrentHeader(blockHeader) } shouldCheckRootHashChanges := query.SameScState @@ -212,8 +216,6 @@ func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice ui rootHashBeforeExecution = service.apiBlockChain.GetCurrentBlockRootHash() } - service.blockChainHook.SetCurrentHeader(service.mainBlockChain.GetCurrentBlockHeader()) - service.wasmVMChangeLocker.RLock() vm, _, err := scrCommon.FindVMByScAddress(service.vmContainer, query.ScAddress) if err != nil { @@ -229,15 +231,6 @@ func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice ui return nil, nil, err } - if service.hasRetriableExecutionError(vmOutput) { - log.Error("Retriable execution error detected. Will retry (once) executeScCall()", "returnCode", vmOutput.ReturnCode, "returnMessage", vmOutput.ReturnMessage) - - vmOutput, err = vm.RunSmartContractCall(vmInput) - if err != nil { - return nil, nil, err - } - } - if query.SameScState { err = service.checkForRootHashChanges(rootHashBeforeExecution) if err != nil { @@ -258,9 +251,24 @@ func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice ui return vmOutput, blockInfo, nil } +func (service *SCQueryService) recreateTrie(blockRootHash []byte, blockHeader data.HeaderHandler) error { + if check.IfNil(blockHeader) { + return process.ErrNilBlockHeader + } + + accountsAdapter := service.blockChainHook.GetAccountsAdapter() + + rootHashHolder := holders.NewDefaultRootHashesHolder(blockRootHash) + if service.isInHistoricalBalancesMode { + rootHashHolder = holders.NewRootHashHolder(blockRootHash, core.OptionalUint32{Value: blockHeader.GetEpoch(), HasValue: true}) + } + + logQueryService.Trace("calling RecreateTrie", "block", blockHeader.GetNonce(), "rootHashHolder", rootHashHolder) + return accountsAdapter.RecreateTrie(rootHashHolder) +} + // TODO: extract duplicated code with nodeBlocks.go func (service *SCQueryService) extractBlockHeaderAndRootHash(query *process.SCQuery) (data.HeaderHandler, []byte, error) { - if len(query.BlockHash) > 0 { currentHeader, err := service.getBlockHeaderByHash(query.BlockHash) if err != nil { @@ -417,10 +425,6 @@ func (service *SCQueryService) createVMCallInput(query *process.SCQuery, gasPric return vmContractCallInput } -func (service *SCQueryService) hasRetriableExecutionError(vmOutput *vmcommon.VMOutput) bool { - return vmOutput.ReturnMessage == "allocation error" -} - // ComputeScCallGasLimit will estimate how many gas a transaction will consume func (service *SCQueryService) ComputeScCallGasLimit(tx *transaction.Transaction) (uint64, error) { argParser := parsers.NewCallArgsParser() diff --git a/process/smartContract/scQueryServiceDispatcher.go b/process/smartContract/scQueryServiceDispatcher.go index 2c51b47d55d..981f71f3dd9 100644 --- a/process/smartContract/scQueryServiceDispatcher.go +++ b/process/smartContract/scQueryServiceDispatcher.go @@ -78,7 +78,7 @@ func (sqsd *scQueryServiceDispatcher) Close() error { for _, scQueryService := range sqsd.list { err := scQueryService.Close() if err != nil { - log.Error("error while closing inner SC query service in scQueryServiceDispatcher.Close", "error", err) + logQueryService.Error("error while closing inner SC query service in scQueryServiceDispatcher.Close", "error", err) errFound = err } } diff --git a/process/smartContract/scQueryService_test.go b/process/smartContract/scQueryService_test.go index 0b76f3a739e..4889dc87ac5 100644 --- a/process/smartContract/scQueryService_test.go +++ b/process/smartContract/scQueryService_test.go @@ -11,7 +11,6 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" @@ -41,7 +40,7 @@ func createMockArgumentsForSCQuery() ArgsNewSCQueryService { BlockChainHook: &testscommon.BlockChainHookStub{ GetAccountsAdapterCalled: func() state.AccountsAdapter { return &stateMocks.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(options common.RootHashHolder) error { return nil }, } @@ -59,9 +58,10 @@ func createMockArgumentsForSCQuery() ArgsNewSCQueryService { return &storageStubs.StorerStub{}, nil }, }, - Marshaller: &marshallerMock.MarshalizerStub{}, - Hasher: &testscommon.HasherStub{}, - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + Marshaller: &marshallerMock.MarshalizerStub{}, + Hasher: &testscommon.HasherStub{}, + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + IsInHistoricalBalancesMode: false, } } @@ -367,10 +367,11 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { _, _, _ = target.ExecuteQuery(&query) assert.True(t, runWasCalled) }) - t.Run("block hash should work", func(t *testing.T) { + t.Run("block hash should work - in deep history mode", func(t *testing.T) { t.Parallel() runWasCalled := false + epoch := uint32(37) mockVM := &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (output *vmcommon.VMOutput, e error) { @@ -396,6 +397,13 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return uint64(math.MaxUint64) }, } + argsNewSCQuery.MainBlockChain = &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Epoch: epoch, + } + }, + } providedHash := []byte("provided hash") providedRootHash := []byte("provided root hash") argsNewSCQuery.Marshaller = &marshallerMock.MarshalizerMock{} @@ -422,14 +430,21 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return true }, GetEpochByHashCalled: func(hash []byte) (uint32, error) { - return 12, nil + return epoch, nil }, } - wasRecreateTrieCalled := false + + recreateTrieWasCalled := false + recreateTrieFromEpochWasCalled := false + providedAccountsAdapter := &stateMocks.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { - wasRecreateTrieCalled = true - assert.Equal(t, providedRootHash, rootHash) + RecreateTrieCalled: func(options common.RootHashHolder) error { + if options.GetEpoch().HasValue { + recreateTrieFromEpochWasCalled = true + assert.Equal(t, providedRootHash, options.GetRootHash()) + return nil + } + recreateTrieWasCalled = true return nil }, } @@ -438,6 +453,7 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return providedAccountsAdapter }, } + argsNewSCQuery.IsInHistoricalBalancesMode = true target, _ := NewSCQueryService(argsNewSCQuery) @@ -452,13 +468,16 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { BlockHash: providedHash, } - _, _, _ = target.ExecuteQuery(&query) + _, _, err := target.ExecuteQuery(&query) assert.True(t, runWasCalled) - assert.True(t, wasRecreateTrieCalled) + assert.True(t, recreateTrieFromEpochWasCalled) + assert.False(t, recreateTrieWasCalled) + assert.Nil(t, err) }) - t.Run("block nonce should work", func(t *testing.T) { + t.Run("block hash should work - in normal mode", func(t *testing.T) { t.Parallel() + epoch := uint32(12) runWasCalled := false mockVM := &mock.VMExecutionHandlerStub{ @@ -487,22 +506,14 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { } providedHash := []byte("provided hash") providedRootHash := []byte("provided root hash") - providedNonce := uint64(123) argsNewSCQuery.Marshaller = &marshallerMock.MarshalizerMock{} - counter := 0 argsNewSCQuery.StorageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { return &storageStubs.StorerStub{ - GetCalled: func(key []byte) ([]byte, error) { - return providedHash, nil - }, GetFromEpochCalled: func(key []byte, epoch uint32) ([]byte, error) { - counter++ - if counter > 2 { - return nil, fmt.Errorf("no scheduled") - } hdr := &block.Header{ RootHash: providedRootHash, + Epoch: epoch, } buff, _ := argsNewSCQuery.Marshaller.Marshal(hdr) return buff, nil @@ -515,15 +526,21 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return true }, GetEpochByHashCalled: func(hash []byte) (uint32, error) { - require.Equal(t, providedHash, hash) - return 12, nil + return epoch, nil }, } - wasRecreateTrieCalled := false + + recreateTrieWasCalled := false + recreateTrieFromEpochWasCalled := false + providedAccountsAdapter := &stateMocks.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { - wasRecreateTrieCalled = true - assert.Equal(t, providedRootHash, rootHash) + RecreateTrieCalled: func(options common.RootHashHolder) error { + if options.GetEpoch().HasValue { + recreateTrieFromEpochWasCalled = true + assert.Equal(t, providedRootHash, options.GetRootHash()) + return nil + } + recreateTrieWasCalled = true return nil }, } @@ -532,6 +549,7 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return providedAccountsAdapter }, } + argsNewSCQuery.IsInHistoricalBalancesMode = false target, _ := NewSCQueryService(argsNewSCQuery) @@ -543,15 +561,123 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { ScAddress: scAddress, FuncName: funcName, Arguments: dataArgs, - BlockNonce: core.OptionalUint64{ - Value: providedNonce, - HasValue: true, - }, + BlockHash: providedHash, } - _, _, _ = target.ExecuteQuery(&query) + _, _, err := target.ExecuteQuery(&query) assert.True(t, runWasCalled) - assert.True(t, wasRecreateTrieCalled) + assert.True(t, recreateTrieWasCalled) + assert.False(t, recreateTrieFromEpochWasCalled) + assert.Nil(t, err) + }) +} + +func TestSCQueryService_RecreateTrie(t *testing.T) { + t.Parallel() + + testRootHash := []byte("test root hash") + t.Run("should not call RecreateTrie if block header is nil", func(t *testing.T) { + t.Parallel() + + argsNewSCQuery := createMockArgumentsForSCQuery() + argsNewSCQuery.BlockChainHook = &testscommon.BlockChainHookStub{ + GetAccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMocks.AccountsStub{ + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { + require.Fail(t, "should not be called") + return nil + }, + } + }, + } + + service, _ := NewSCQueryService(argsNewSCQuery) + err := service.recreateTrie(testRootHash, nil) + assert.ErrorIs(t, err, process.ErrNilBlockHeader) + }) + t.Run("should call RecreateTrieFromEpoch if in deep history mode", func(t *testing.T) { + t.Parallel() + + recreateTrieWasCalled := false + recreateTrieFromEpochWasCalled := false + + argsNewSCQuery := createMockArgumentsForSCQuery() + argsNewSCQuery.IsInHistoricalBalancesMode = true + argsNewSCQuery.MainBlockChain = &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return nil // after the genesis we do not have a header as current block + }, + } + argsNewSCQuery.BlockChainHook = &testscommon.BlockChainHookStub{ + GetAccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMocks.AccountsStub{ + RecreateTrieCalled: func(options common.RootHashHolder) error { + if options.GetEpoch().HasValue { + recreateTrieWasCalled = false + recreateTrieFromEpochWasCalled = true + + assert.Equal(t, testRootHash, options.GetRootHash()) + return nil + } + recreateTrieWasCalled = true + recreateTrieFromEpochWasCalled = false + + assert.Equal(t, testRootHash, options.GetRootHash()) + return nil + }, + } + }, + } + + service, _ := NewSCQueryService(argsNewSCQuery) + + // For genesis block, RecreateTrieFromEpoch should be called + err := service.recreateTrie(testRootHash, &block.Header{}) + assert.Nil(t, err) + assert.True(t, recreateTrieFromEpochWasCalled) + assert.False(t, recreateTrieWasCalled) + }) + t.Run("should call RecreateTrie if not in deep history mode", func(t *testing.T) { + t.Parallel() + + recreateTrieWasCalled := false + recreateTrieFromEpochWasCalled := false + + argsNewSCQuery := createMockArgumentsForSCQuery() + argsNewSCQuery.IsInHistoricalBalancesMode = false + argsNewSCQuery.MainBlockChain = &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return nil // after the genesis we do not have a header as current block + }, + } + argsNewSCQuery.BlockChainHook = &testscommon.BlockChainHookStub{ + GetAccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMocks.AccountsStub{ + RecreateTrieCalled: func(options common.RootHashHolder) error { + if options.GetEpoch().HasValue { + recreateTrieWasCalled = false + recreateTrieFromEpochWasCalled = true + + assert.Equal(t, testRootHash, options.GetRootHash()) + return nil + } + recreateTrieWasCalled = true + recreateTrieFromEpochWasCalled = false + + assert.Equal(t, testRootHash, options.GetRootHash()) + return nil + }, + } + }, + } + + service, _ := NewSCQueryService(argsNewSCQuery) + + // For genesis block, RecreateTrieFromEpoch should be called + err := service.recreateTrie(testRootHash, &block.Header{}) + assert.Nil(t, err) + assert.False(t, recreateTrieFromEpochWasCalled) + assert.True(t, recreateTrieWasCalled) }) } @@ -896,16 +1022,6 @@ func TestSCQueryService_ShouldFailIfStateChanged(t *testing.T) { t.Parallel() args := createMockArgumentsForSCQuery() - args.BlockChainHook = &testscommon.BlockChainHookStub{ - GetAccountsAdapterCalled: func() state.AccountsAdapter { - return &stateMocks.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { - return nil - }, - } - }, - } - rootHashCalledCounter := 0 args.APIBlockChain = &testscommon.ChainHandlerStub{ GetCurrentBlockRootHashCalled: func() []byte { @@ -927,7 +1043,7 @@ func TestSCQueryService_ShouldFailIfStateChanged(t *testing.T) { FuncName: "function", }) require.Nil(t, res) - require.True(t, errors.Is(err, process.ErrStateChangedWhileExecutingVmQuery)) + require.ErrorIs(t, err, process.ErrStateChangedWhileExecutingVmQuery) } func TestSCQueryService_ShouldWorkIfStateDidntChange(t *testing.T) { diff --git a/process/smartContract/scrCommon/common.go b/process/smartContract/scrCommon/common.go index 957abe5800b..07efc6cfd59 100644 --- a/process/smartContract/scrCommon/common.go +++ b/process/smartContract/scrCommon/common.go @@ -1,6 +1,8 @@ package scrCommon import ( + "math/big" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/hashing" @@ -12,7 +14,6 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "math/big" ) // TestSmartContractProcessor is a SmartContractProcessor used in integration tests @@ -31,29 +32,30 @@ type ExecutableChecker interface { // ArgsNewSmartContractProcessor defines the arguments needed for new smart contract processor type ArgsNewSmartContractProcessor struct { - VmContainer process.VirtualMachinesContainer - ArgsParser process.ArgumentsParser - Hasher hashing.Hasher - Marshalizer marshal.Marshalizer - AccountsDB state.AccountsAdapter - BlockChainHook process.BlockChainHookHandler - BuiltInFunctions vmcommon.BuiltInFunctionContainer - PubkeyConv core.PubkeyConverter - ShardCoordinator sharding.Coordinator - ScrForwarder process.IntermediateTransactionHandler - TxFeeHandler process.TransactionFeeHandler - EconomicsFee process.FeeHandler - TxTypeHandler process.TxTypeHandler - GasHandler process.GasHandler - GasSchedule core.GasScheduleNotifier - TxLogsProcessor process.TransactionLogProcessor - BadTxForwarder process.IntermediateTransactionHandler - EnableRoundsHandler process.EnableRoundsHandler - EnableEpochsHandler common.EnableEpochsHandler - EnableEpochs config.EnableEpochs - VMOutputCacher storage.Cacher - WasmVMChangeLocker common.Locker - IsGenesisProcessing bool + VmContainer process.VirtualMachinesContainer + ArgsParser process.ArgumentsParser + Hasher hashing.Hasher + Marshalizer marshal.Marshalizer + AccountsDB state.AccountsAdapter + BlockChainHook process.BlockChainHookHandler + BuiltInFunctions vmcommon.BuiltInFunctionContainer + PubkeyConv core.PubkeyConverter + ShardCoordinator sharding.Coordinator + ScrForwarder process.IntermediateTransactionHandler + TxFeeHandler process.TransactionFeeHandler + EconomicsFee process.FeeHandler + TxTypeHandler process.TxTypeHandler + GasHandler process.GasHandler + GasSchedule core.GasScheduleNotifier + TxLogsProcessor process.TransactionLogProcessor + FailedTxLogsAccumulator process.FailedTxLogsAccumulator + BadTxForwarder process.IntermediateTransactionHandler + EnableRoundsHandler process.EnableRoundsHandler + EnableEpochsHandler common.EnableEpochsHandler + EnableEpochs config.EnableEpochs + VMOutputCacher storage.Cacher + WasmVMChangeLocker common.Locker + IsGenesisProcessing bool } // FindVMByScAddress is exported for use in all version of scr processors diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index fff94e55389..6d183fbf821 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -1379,7 +1379,7 @@ func TestMetaBootstrap_RollBackIsEmptyCallRollBackOneBlockOkValsShouldWork(t *te } args.ForkDetector = createForkDetector(currentHdrNonce, currentHdrHash, remFlags) args.Accounts = &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -1520,7 +1520,7 @@ func TestMetaBootstrap_RollBackIsEmptyCallRollBackOneBlockToGenesisShouldWork(t } args.ForkDetector = createForkDetector(currentHdrNonce, currentHdrHash, remFlags) args.Accounts = &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 8abfd29e6bc..070b926df0f 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -1525,7 +1525,7 @@ func TestBootstrap_RollBackIsEmptyCallRollBackOneBlockOkValsShouldWork(t *testin } args.ForkDetector = createForkDetector(currentHdrNonce, currentHdrHash, remFlags) args.Accounts = &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } @@ -1668,7 +1668,7 @@ func TestBootstrap_RollbackIsEmptyCallRollBackOneBlockToGenesisShouldWork(t *tes } args.ForkDetector = createForkDetector(currentHdrNonce, currentHdrHash, remFlags) args.Accounts = &stateMock.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return nil }, } diff --git a/process/track/errors.go b/process/track/errors.go index 2a0c2e57672..2c9a3a5c297 100644 --- a/process/track/errors.go +++ b/process/track/errors.go @@ -30,3 +30,6 @@ var ErrNotarizedHeaderOffsetIsOutOfBound = errors.New("requested offset of the n // ErrNilRoundHandler signals that a nil roundHandler has been provided var ErrNilRoundHandler = errors.New("nil roundHandler") + +// ErrNilKeysHandler signals that a nil keys handler was provided +var ErrNilKeysHandler = errors.New("nil keys handler") diff --git a/process/track/interface.go b/process/track/interface.go index 7d7966060da..1dbfa2caa2c 100644 --- a/process/track/interface.go +++ b/process/track/interface.go @@ -1,6 +1,7 @@ package track import ( + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" ) @@ -47,3 +48,10 @@ type blockBalancerHandler interface { SetLastShardProcessedMetaNonce(shardID uint32, nonce uint64) IsInterfaceNil() bool } + +// KeysHandler defines the operations implemented by a component that will manage all keys, +// including the single signer keys or the set of multi-keys +type KeysHandler interface { + ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) + IsInterfaceNil() bool +} diff --git a/consensus/spos/sentSignaturesTracker.go b/process/track/sentSignaturesTracker.go similarity index 62% rename from consensus/spos/sentSignaturesTracker.go rename to process/track/sentSignaturesTracker.go index de7ecd69543..515f56a61f6 100644 --- a/consensus/spos/sentSignaturesTracker.go +++ b/process/track/sentSignaturesTracker.go @@ -1,11 +1,10 @@ -package spos +package track import ( "sync" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/consensus" ) // externalPeerID is just a marker so the ResetRoundsWithoutReceivedMessages will know it is not an owned peer ID @@ -15,11 +14,11 @@ const externalPeerID = core.PeerID("external peer id") type sentSignaturesTracker struct { mut sync.RWMutex sentFromSelf map[string]struct{} - keysHandler consensus.KeysHandler + keysHandler KeysHandler } // NewSentSignaturesTracker will create a new instance of a tracker able to record if a signature was sent from self -func NewSentSignaturesTracker(keysHandler consensus.KeysHandler) (*sentSignaturesTracker, error) { +func NewSentSignaturesTracker(keysHandler KeysHandler) (*sentSignaturesTracker, error) { if check.IfNil(keysHandler) { return nil, ErrNilKeysHandler } @@ -44,21 +43,18 @@ func (tracker *sentSignaturesTracker) SignatureSent(pkBytes []byte) { tracker.mut.Unlock() } -// ReceivedActualSigners is called whenever a final info is received. If a signer public key did not send a signature -// from the current host, it will call the reset rounds without received message. This is the case when another instance of a -// multikey node (possibly running as main) broadcast only the final info as it contained the leader + a few signers -func (tracker *sentSignaturesTracker) ReceivedActualSigners(signersPks []string) { +// ResetCountersForManagedBlockSigner is called at commit time and will call the reset rounds without received messages +// for the provided key that actually signed a block +func (tracker *sentSignaturesTracker) ResetCountersForManagedBlockSigner(signerPk []byte) { tracker.mut.RLock() defer tracker.mut.RUnlock() - for _, signerPk := range signersPks { - _, isSentFromSelf := tracker.sentFromSelf[signerPk] - if isSentFromSelf { - continue - } - - tracker.keysHandler.ResetRoundsWithoutReceivedMessages([]byte(signerPk), externalPeerID) + _, isSentFromSelf := tracker.sentFromSelf[string(signerPk)] + if isSentFromSelf { + return } + + tracker.keysHandler.ResetRoundsWithoutReceivedMessages(signerPk, externalPeerID) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/consensus/spos/sentSignaturesTracker_test.go b/process/track/sentSignaturesTracker_test.go similarity index 69% rename from consensus/spos/sentSignaturesTracker_test.go rename to process/track/sentSignaturesTracker_test.go index a0ecc275e68..8a60dba37dd 100644 --- a/consensus/spos/sentSignaturesTracker_test.go +++ b/process/track/sentSignaturesTracker_test.go @@ -1,4 +1,4 @@ -package spos +package track import ( "testing" @@ -37,13 +37,11 @@ func TestSentSignaturesTracker_IsInterfaceNil(t *testing.T) { assert.False(t, tracker.IsInterfaceNil()) } -func TestSentSignaturesTracker_ReceivedActualSigners(t *testing.T) { +func TestSentSignaturesTracker_ResetCountersForManagedBlockSigner(t *testing.T) { t.Parallel() - pk1 := "pk1" - pk2 := "pk2" - pk3 := "pk3" - pk4 := "pk4" + pk1 := []byte("pk1") + pk2 := []byte("pk2") t.Run("empty map should call remove", func(t *testing.T) { t.Parallel() @@ -56,13 +54,12 @@ func TestSentSignaturesTracker_ReceivedActualSigners(t *testing.T) { }, } - signers := []string{pk1, pk2} tracker, _ := NewSentSignaturesTracker(keysHandler) - tracker.ReceivedActualSigners(signers) + tracker.ResetCountersForManagedBlockSigner(pk1) - assert.Equal(t, [][]byte{[]byte(pk1), []byte(pk2)}, pkBytesSlice) + assert.Equal(t, [][]byte{pk1}, pkBytesSlice) }) - t.Run("should call remove only for the public keys that did not sent signatures from self", func(t *testing.T) { + t.Run("should call remove only for the public key that did not sent signatures from self", func(t *testing.T) { t.Parallel() pkBytesSlice := make([][]byte, 0) @@ -73,21 +70,21 @@ func TestSentSignaturesTracker_ReceivedActualSigners(t *testing.T) { }, } - signers := []string{pk1, pk2, pk3, pk4} tracker, _ := NewSentSignaturesTracker(keysHandler) - tracker.SignatureSent([]byte(pk1)) - tracker.SignatureSent([]byte(pk3)) + tracker.SignatureSent(pk1) - tracker.ReceivedActualSigners(signers) - assert.Equal(t, [][]byte{[]byte("pk2"), []byte("pk4")}, pkBytesSlice) + tracker.ResetCountersForManagedBlockSigner(pk1) + tracker.ResetCountersForManagedBlockSigner(pk2) + assert.Equal(t, [][]byte{pk2}, pkBytesSlice) t.Run("after reset, all should be called", func(t *testing.T) { tracker.StartRound() - tracker.ReceivedActualSigners(signers) + tracker.ResetCountersForManagedBlockSigner(pk1) + tracker.ResetCountersForManagedBlockSigner(pk2) assert.Equal(t, [][]byte{ - []byte("pk2"), []byte("pk4"), // from the previous test - []byte("pk1"), []byte("pk2"), []byte("pk3"), []byte("pk4"), // from this call + pk2, // from the previous test + pk1, pk2, // from this call }, pkBytesSlice) }) }) diff --git a/process/transaction/baseProcess.go b/process/transaction/baseProcess.go index 45e9f2aac13..b1e95a71339 100644 --- a/process/transaction/baseProcess.go +++ b/process/transaction/baseProcess.go @@ -29,6 +29,7 @@ type baseTxProcessor struct { enableEpochsHandler common.EnableEpochsHandler txVersionChecker process.TxVersionCheckerHandler guardianChecker process.GuardianChecker + txTypeHandler process.TxTypeHandler } func (txProc *baseTxProcessor) getAccounts( @@ -145,7 +146,8 @@ func (txProc *baseTxProcessor) checkTxValues( if tx.GasLimit < txProc.economicsFee.ComputeGasLimit(tx) { return process.ErrNotEnoughGasInUserTx } - txFee = txProc.economicsFee.ComputeFeeForProcessing(tx, tx.GasLimit) + + txFee = txProc.computeInnerTxFee(tx) } else { txFee = txProc.economicsFee.ComputeTxFee(tx) } @@ -172,6 +174,29 @@ func (txProc *baseTxProcessor) checkTxValues( return nil } +func (txProc *baseTxProcessor) computeInnerTxFee(tx *transaction.Transaction) *big.Int { + if txProc.enableEpochsHandler.IsFlagEnabled(common.FixRelayedBaseCostFlag) { + return txProc.computeInnerTxFeeAfterBaseCostFix(tx) + } + + return txProc.economicsFee.ComputeFeeForProcessing(tx, tx.GasLimit) +} + +func (txProc *baseTxProcessor) computeInnerTxFeeAfterBaseCostFix(tx *transaction.Transaction) *big.Int { + _, dstShardTxType := txProc.txTypeHandler.ComputeTransactionType(tx) + if dstShardTxType == process.MoveBalance { + return txProc.economicsFee.ComputeMoveBalanceFee(tx) + } + + moveBalanceGasLimit := txProc.economicsFee.ComputeGasLimit(tx) + gasToUse := tx.GetGasLimit() - moveBalanceGasLimit + moveBalanceUserFee := txProc.economicsFee.ComputeMoveBalanceFee(tx) + processingUserFee := txProc.economicsFee.ComputeFeeForProcessing(tx, gasToUse) + txFee := big.NewInt(0).Add(moveBalanceUserFee, processingUserFee) + + return txFee +} + func (txProc *baseTxProcessor) checkUserNames(tx *transaction.Transaction, acntSnd, acntDst state.UserAccountHandler) error { isUserNameWrong := len(tx.SndUserName) > 0 && !check.IfNil(acntSnd) && !bytes.Equal(tx.SndUserName, acntSnd.GetUserName()) diff --git a/process/transaction/baseProcess_test.go b/process/transaction/baseProcess_test.go index 3527748a72e..7795c1a0f6a 100644 --- a/process/transaction/baseProcess_test.go +++ b/process/transaction/baseProcess_test.go @@ -44,6 +44,7 @@ func createMockBaseTxProcessor() *baseTxProcessor { enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag), txVersionChecker: &testscommon.TxVersionCheckerStub{}, guardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + txTypeHandler: &testscommon.TxTypeHandlerMock{}, } return &baseProc @@ -212,6 +213,7 @@ func TestBaseTxProcessor_VerifyGuardian(t *testing.T) { enableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag), txVersionChecker: &testscommon.TxVersionCheckerStub{}, guardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + txTypeHandler: &testscommon.TxTypeHandlerMock{}, } notGuardedAccount := &stateMock.UserAccountStub{} diff --git a/process/transaction/export_test.go b/process/transaction/export_test.go index a10b1e2e50c..07ed7a91896 100644 --- a/process/transaction/export_test.go +++ b/process/transaction/export_test.go @@ -13,19 +13,23 @@ import ( type TxProcessor *txProcessor +// GetAccounts calls the un-exported method getAccounts func (txProc *txProcessor) GetAccounts(adrSrc, adrDst []byte, ) (acntSrc, acntDst state.UserAccountHandler, err error) { return txProc.getAccounts(adrSrc, adrDst) } +// CheckTxValues calls the un-exported method checkTxValues func (txProc *txProcessor) CheckTxValues(tx *transaction.Transaction, acntSnd, acntDst state.UserAccountHandler, isUserTxOfRelayed bool) error { return txProc.checkTxValues(tx, acntSnd, acntDst, isUserTxOfRelayed) } +// IncreaseNonce calls IncreaseNonce on the provided account func (txProc *txProcessor) IncreaseNonce(acntSrc state.UserAccountHandler) { acntSrc.IncreaseNonce(1) } +// ProcessTxFee calls the un-exported method processTxFee func (txProc *txProcessor) ProcessTxFee( tx *transaction.Transaction, acntSnd, acntDst state.UserAccountHandler, @@ -35,24 +39,28 @@ func (txProc *txProcessor) ProcessTxFee( return txProc.processTxFee(tx, acntSnd, acntDst, txType, isUserTxOfRelayed) } +// SetWhitelistHandler sets the un-exported field whiteListerVerifiedTxs func (inTx *InterceptedTransaction) SetWhitelistHandler(handler process.WhiteListHandler) { inTx.whiteListerVerifiedTxs = handler } +// IsCrossTxFromMe calls the un-exported method isCrossTxFromMe func (txProc *baseTxProcessor) IsCrossTxFromMe(adrSrc, adrDst []byte) bool { return txProc.isCrossTxFromMe(adrSrc, adrDst) } +// ProcessUserTx calls the un-exported method processUserTx func (txProc *txProcessor) ProcessUserTx( originalTx *transaction.Transaction, userTx *transaction.Transaction, relayedTxValue *big.Int, relayedNonce uint64, - txHash []byte, + originalTxHash []byte, ) (vmcommon.ReturnCode, error) { - return txProc.processUserTx(originalTx, userTx, relayedTxValue, relayedNonce, txHash) + return txProc.processUserTx(originalTx, userTx, relayedTxValue, relayedNonce, originalTxHash) } +// ProcessMoveBalanceCostRelayedUserTx calls the un-exported method processMoveBalanceCostRelayedUserTx func (txProc *txProcessor) ProcessMoveBalanceCostRelayedUserTx( userTx *transaction.Transaction, userScr *smartContractResult.SmartContractResult, @@ -62,6 +70,7 @@ func (txProc *txProcessor) ProcessMoveBalanceCostRelayedUserTx( return txProc.processMoveBalanceCostRelayedUserTx(userTx, userScr, userAcc, originalTxHash) } +// ExecuteFailedRelayedTransaction calls the un-exported method executeFailedRelayedUserTx func (txProc *txProcessor) ExecuteFailedRelayedTransaction( userTx *transaction.Transaction, relayerAdr []byte, @@ -81,20 +90,22 @@ func (txProc *txProcessor) ExecuteFailedRelayedTransaction( errorMsg) } +// CheckMaxGasPrice calls the un-exported method checkMaxGasPrice func (inTx *InterceptedTransaction) CheckMaxGasPrice() error { return inTx.checkMaxGasPrice() } +// VerifyGuardian calls the un-exported method verifyGuardian func (txProc *txProcessor) VerifyGuardian(tx *transaction.Transaction, account state.UserAccountHandler) error { return txProc.verifyGuardian(tx, account) } -// ShouldIncreaseNonce - +// ShouldIncreaseNonce calls the un-exported method shouldIncreaseNonce func (txProc *txProcessor) ShouldIncreaseNonce(executionErr error) bool { return txProc.shouldIncreaseNonce(executionErr) } -// AddNonExecutableLog - +// AddNonExecutableLog calls the un-exported method addNonExecutableLog func (txProc *txProcessor) AddNonExecutableLog(executionErr error, originalTxHash []byte, originalTx data.TransactionHandler) error { return txProc.addNonExecutableLog(executionErr, originalTxHash, originalTx) } diff --git a/process/transaction/interceptedTransaction.go b/process/transaction/interceptedTransaction.go index 0aedf837d09..831afdcbcbc 100644 --- a/process/transaction/interceptedTransaction.go +++ b/process/transaction/interceptedTransaction.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" logger "github.com/multiversx/mx-chain-logger-go" @@ -42,6 +43,8 @@ type InterceptedTransaction struct { sndShard uint32 isForCurrentShard bool enableSignedTxWithHash bool + enableEpochsHandler common.EnableEpochsHandler + relayedTxV3Processor process.RelayedTxV3Processor } // NewInterceptedTransaction returns a new instance of InterceptedTransaction @@ -61,6 +64,8 @@ func NewInterceptedTransaction( enableSignedTxWithHash bool, txSignHasher hashing.Hasher, txVersionChecker process.TxVersionCheckerHandler, + enableEpochsHandler common.EnableEpochsHandler, + relayedTxV3Processor process.RelayedTxV3Processor, ) (*InterceptedTransaction, error) { if txBuff == nil { @@ -105,6 +110,12 @@ func NewInterceptedTransaction( if check.IfNil(txVersionChecker) { return nil, process.ErrNilTransactionVersionChecker } + if check.IfNil(enableEpochsHandler) { + return nil, process.ErrNilEnableEpochsHandler + } + if check.IfNil(relayedTxV3Processor) { + return nil, process.ErrNilRelayedTxV3Processor + } tx, err := createTx(protoMarshalizer, txBuff) if err != nil { @@ -127,6 +138,8 @@ func NewInterceptedTransaction( enableSignedTxWithHash: enableSignedTxWithHash, txVersionChecker: txVersionChecker, txSignHasher: txSignHasher, + enableEpochsHandler: enableEpochsHandler, + relayedTxV3Processor: relayedTxV3Processor, } err = inTx.processFields(txBuff) @@ -199,51 +212,102 @@ func (inTx *InterceptedTransaction) CheckValidity() error { return err } + err = inTx.verifyIfRelayedTxV3(inTx.tx) + if err != nil { + return err + } + + if len(inTx.tx.RelayerAddr) > 0 { + return fmt.Errorf("%w, relayer address found on transaction", process.ErrWrongTransaction) + } + inTx.whiteListerVerifiedTxs.Add([][]byte{inTx.Hash()}) } return nil } -func isRelayedTx(funcName string) bool { - return core.RelayedTransaction == funcName || core.RelayedTransactionV2 == funcName -} +func (inTx *InterceptedTransaction) checkRecursiveRelayed(userTxData []byte, innerTxs []*transaction.Transaction) error { + if isRelayedV3(innerTxs) { + return process.ErrRecursiveRelayedTxIsNotAllowed + } -func (inTx *InterceptedTransaction) verifyIfRelayedTxV2(tx *transaction.Transaction) error { - funcName, userTxArgs, err := inTx.argsParser.ParseCallData(string(tx.Data)) + funcName, _, err := inTx.argsParser.ParseCallData(string(userTxData)) if err != nil { return nil } - if core.RelayedTransactionV2 != funcName { - return nil + + if isRelayedTx(funcName) { + return process.ErrRecursiveRelayedTxIsNotAllowed } - userTx, err := createRelayedV2(tx, userTxArgs) + return nil +} + +func isRelayedTx(funcName string) bool { + return core.RelayedTransaction == funcName || + core.RelayedTransactionV2 == funcName +} + +func isRelayedV3(innerTxs []*transaction.Transaction) bool { + return len(innerTxs) > 0 +} + +func (inTx *InterceptedTransaction) verifyIfRelayedTxV3(tx *transaction.Transaction) error { + if len(tx.InnerTransactions) == 0 { + return nil + } + if !inTx.enableEpochsHandler.IsFlagEnabled(common.RelayedTransactionsV3Flag) { + return process.ErrRelayedTxV3Disabled + } + err := inTx.relayedTxV3Processor.CheckRelayedTx(tx) if err != nil { return err } - err = inTx.verifySig(userTx) - if err != nil { - return fmt.Errorf("inner transaction: %w", err) + funcName, _, err := inTx.argsParser.ParseCallData(string(tx.Data)) + if err == nil && isRelayedTx(funcName) { + return process.ErrMultipleRelayedTxTypesIsNotAllowed } - err = inTx.VerifyGuardianSig(userTx) - if err != nil { - return fmt.Errorf("inner transaction: %w", err) + return inTx.verifyInnerTransactions(tx) +} + +func (inTx *InterceptedTransaction) verifyInnerTransactions(tx *transaction.Transaction) error { + for _, innerTx := range tx.InnerTransactions { + err := inTx.integrity(innerTx) + if err != nil { + return fmt.Errorf("inner transaction: %w", err) + } + + err = inTx.verifyUserTx(innerTx) + if err != nil { + return fmt.Errorf("inner transaction: %w", err) + } } - funcName, _, err = inTx.argsParser.ParseCallData(string(userTx.Data)) + return nil +} + +func (inTx *InterceptedTransaction) verifyIfRelayedTxV2(tx *transaction.Transaction) error { + funcName, userTxArgs, err := inTx.argsParser.ParseCallData(string(tx.Data)) if err != nil { return nil } + if core.RelayedTransactionV2 != funcName { + return nil + } - // recursive relayed transactions are not allowed - if isRelayedTx(funcName) { - return process.ErrRecursiveRelayedTxIsNotAllowed + if len(tx.InnerTransactions) > 0 { + return process.ErrMultipleRelayedTxTypesIsNotAllowed } - return nil + userTx, err := createRelayedV2(tx, userTxArgs) + if err != nil { + return err + } + + return inTx.verifyUserTx(userTx) } func (inTx *InterceptedTransaction) verifyIfRelayedTx(tx *transaction.Transaction) error { @@ -259,6 +323,10 @@ func (inTx *InterceptedTransaction) verifyIfRelayedTx(tx *transaction.Transactio return process.ErrInvalidArguments } + if len(tx.InnerTransactions) > 0 { + return process.ErrMultipleRelayedTxTypesIsNotAllowed + } + userTx, err := createTx(inTx.signMarshalizer, userTxArgs[0]) if err != nil { return fmt.Errorf("inner transaction: %w", err) @@ -273,28 +341,23 @@ func (inTx *InterceptedTransaction) verifyIfRelayedTx(tx *transaction.Transactio return fmt.Errorf("inner transaction: %w", err) } - err = inTx.verifySig(userTx) + return inTx.verifyUserTx(userTx) +} + +func (inTx *InterceptedTransaction) verifyUserTx(userTx *transaction.Transaction) error { + // recursive relayed transactions are not allowed + err := inTx.checkRecursiveRelayed(userTx.Data, userTx.InnerTransactions) if err != nil { return fmt.Errorf("inner transaction: %w", err) } - - err = inTx.VerifyGuardianSig(userTx) + err = inTx.verifySig(userTx) if err != nil { return fmt.Errorf("inner transaction: %w", err) } - if len(userTx.Data) == 0 { - return nil - } - - funcName, _, err = inTx.argsParser.ParseCallData(string(userTx.Data)) + err = inTx.VerifyGuardianSig(userTx) if err != nil { - return nil - } - - // recursive relayed transactions are not allowed - if isRelayedTx(funcName) { - return process.ErrRecursiveRelayedTxIsNotAllowed + return fmt.Errorf("inner transaction: %w", err) } return nil @@ -474,6 +537,11 @@ func (inTx *InterceptedTransaction) Fee() *big.Int { return inTx.feeHandler.ComputeTxFee(inTx.tx) } +// RelayerAddress returns the relayer address from transaction +func (inTx *InterceptedTransaction) RelayerAddress() []byte { + return inTx.tx.RelayerAddr +} + // Type returns the type of this intercepted data func (inTx *InterceptedTransaction) Type() string { return "intercepted tx" diff --git a/process/transaction/interceptedTransaction_test.go b/process/transaction/interceptedTransaction_test.go index b2aa2e81526..44d416194ab 100644 --- a/process/transaction/interceptedTransaction_test.go +++ b/process/transaction/interceptedTransaction_test.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "math/big" + "strings" "testing" "github.com/multiversx/mx-chain-core-go/core" @@ -14,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" dataTransaction "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/interceptors" "github.com/multiversx/mx-chain-go/process/mock" @@ -21,8 +23,10 @@ import ( "github.com/multiversx/mx-chain-go/process/transaction" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -33,8 +37,10 @@ var errSignerMockVerifySigFails = errors.New("errSignerMockVerifySigFails") var senderShard = uint32(2) var recvShard = uint32(3) +var relayerShard = senderShard var senderAddress = []byte("12345678901234567890123456789012") var recvAddress = []byte("23456789012345678901234567890123") +var relayerAddress = []byte("34567890123456789012345678901234") var sigBad = []byte("bad-signature") var sigOk = []byte("signature") @@ -89,6 +95,9 @@ func createInterceptedTxWithTxFeeHandlerAndVersionChecker(tx *dataTransaction.Tr if bytes.Equal(address, recvAddress) { return recvShard } + if bytes.Equal(address, relayerAddress) { + return relayerShard + } return shardCoordinator.CurrentShard } @@ -108,11 +117,13 @@ func createInterceptedTxWithTxFeeHandlerAndVersionChecker(tx *dataTransaction.Tr shardCoordinator, txFeeHandler, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("T"), false, &hashingMocks.HasherMock{}, txVerChecker, + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) } @@ -132,6 +143,9 @@ func createInterceptedTxFromPlainTx(tx *dataTransaction.Transaction, txFeeHandle if bytes.Equal(address, recvAddress) { return recvShard } + if bytes.Equal(address, relayerAddress) { + return relayerShard + } return shardCoordinator.CurrentShard } @@ -151,11 +165,13 @@ func createInterceptedTxFromPlainTx(tx *dataTransaction.Transaction, txFeeHandle shardCoordinator, txFeeHandler, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, chainID, false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(minTxVersion), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) } @@ -175,10 +191,23 @@ func createInterceptedTxFromPlainTxWithArgParser(tx *dataTransaction.Transaction if bytes.Equal(address, recvAddress) { return recvShard } + if bytes.Equal(address, relayerAddress) { + return relayerShard + } return shardCoordinator.CurrentShard } + txFeeHandler := createFreeTxFeeHandler() + relayedTxV3Processor, err := transaction.NewRelayedTxV3Processor(transaction.ArgRelayedTxV3Processor{ + EconomicsFee: txFeeHandler, + ShardCoordinator: shardCoordinator, + MaxTransactionsAllowed: 10, + }) + if err != nil { + return nil, err + } + return transaction.NewInterceptedTransaction( txBuff, marshalizer, @@ -192,13 +221,15 @@ func createInterceptedTxFromPlainTxWithArgParser(tx *dataTransaction.Transaction }, }, shardCoordinator, - createFreeTxFeeHandler(), + txFeeHandler, &testscommon.WhiteListHandlerStub{}, smartContract.NewArgumentParser(), tx.ChainID, false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(tx.Version), + enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedTransactionsV3Flag), + relayedTxV3Processor, ) } @@ -218,11 +249,13 @@ func TestNewInterceptedTransaction_NilBufferShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("chainID"), false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -248,6 +281,8 @@ func TestNewInterceptedTransaction_NilArgsParser(t *testing.T) { false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -268,11 +303,13 @@ func TestNewInterceptedTransaction_NilVersionChecker(t *testing.T) { mock.NewOneShardCoordinatorMock(), &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("chainID"), false, &hashingMocks.HasherMock{}, nil, + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -293,11 +330,13 @@ func TestNewInterceptedTransaction_NilMarshalizerShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("chainID"), false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -318,11 +357,13 @@ func TestNewInterceptedTransaction_NilSignMarshalizerShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("chainID"), false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -343,11 +384,13 @@ func TestNewInterceptedTransaction_NilHasherShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("chainID"), false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -368,11 +411,13 @@ func TestNewInterceptedTransaction_NilKeyGenShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("chainID"), false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -393,11 +438,13 @@ func TestNewInterceptedTransaction_NilSignerShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("chainID"), false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -418,11 +465,13 @@ func TestNewInterceptedTransaction_NilPubkeyConverterShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("chainID"), false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -443,11 +492,13 @@ func TestNewInterceptedTransaction_NilCoordinatorShouldErr(t *testing.T) { nil, &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("chainID"), false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -468,11 +519,13 @@ func TestNewInterceptedTransaction_NilFeeHandlerShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), nil, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("chainID"), false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -493,11 +546,13 @@ func TestNewInterceptedTransaction_NilWhiteListerVerifiedTxsShouldErr(t *testing mock.NewOneShardCoordinatorMock(), &economicsmocks.EconomicsHandlerStub{}, nil, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("chainID"), false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -518,11 +573,13 @@ func TestNewInterceptedTransaction_InvalidChainIDShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, nil, false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -543,17 +600,73 @@ func TestNewInterceptedTransaction_NilTxSignHasherShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("chainID"), false, nil, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) assert.Equal(t, process.ErrNilHasher, err) } +func TestNewInterceptedTransaction_NilEnableEpochsHandlerShouldErr(t *testing.T) { + t.Parallel() + + txi, err := transaction.NewInterceptedTransaction( + make([]byte, 0), + &mock.MarshalizerMock{}, + &mock.MarshalizerMock{}, + &hashingMocks.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + createMockPubKeyConverter(), + mock.NewOneShardCoordinatorMock(), + &economicsmocks.EconomicsHandlerStub{}, + &testscommon.WhiteListHandlerStub{}, + &testscommon.ArgumentParserMock{}, + []byte("chainID"), + false, + &hashingMocks.HasherMock{}, + versioning.NewTxVersionChecker(1), + nil, + &processMocks.RelayedTxV3ProcessorMock{}, + ) + + assert.Nil(t, txi) + assert.Equal(t, process.ErrNilEnableEpochsHandler, err) +} + +func TestNewInterceptedTransaction_NilRelayedV3ProcessorShouldErr(t *testing.T) { + t.Parallel() + + txi, err := transaction.NewInterceptedTransaction( + make([]byte, 0), + &mock.MarshalizerMock{}, + &mock.MarshalizerMock{}, + &hashingMocks.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + createMockPubKeyConverter(), + mock.NewOneShardCoordinatorMock(), + &economicsmocks.EconomicsHandlerStub{}, + &testscommon.WhiteListHandlerStub{}, + &testscommon.ArgumentParserMock{}, + []byte("chainID"), + false, + &hashingMocks.HasherMock{}, + versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + nil, + ) + + assert.Nil(t, txi) + assert.Equal(t, process.ErrNilRelayedTxV3Processor, err) +} + func TestNewInterceptedTransaction_UnmarshalingTxFailsShouldErr(t *testing.T) { t.Parallel() @@ -574,11 +687,13 @@ func TestNewInterceptedTransaction_UnmarshalingTxFailsShouldErr(t *testing.T) { mock.NewOneShardCoordinatorMock(), &economicsmocks.EconomicsHandlerStub{}, &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("chainID"), false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(1), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, txi) @@ -995,6 +1110,32 @@ func TestInterceptedTransaction_CheckValidityOkValsShouldWork(t *testing.T) { assert.Nil(t, err) } +func TestInterceptedTransaction_CheckValidityRelayerAddressShouldError(t *testing.T) { + t.Parallel() + + minTxVersion := uint32(1) + chainID := []byte("chain") + tx := &dataTransaction.Transaction{ + Nonce: 1, + Value: big.NewInt(2), + Data: []byte("data"), + GasLimit: 3, + GasPrice: 4, + RcvAddr: recvAddress, + SndAddr: senderAddress, + Signature: sigOk, + ChainID: chainID, + Version: minTxVersion, + RelayerAddr: []byte("45678901234567890123456789012345"), + } + txi, _ := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler(), chainID, minTxVersion) + + err := txi.CheckValidity() + + assert.True(t, errors.Is(err, process.ErrWrongTransaction)) + assert.True(t, strings.Contains(err.Error(), "relayer address found on transaction")) +} + func TestInterceptedTransaction_CheckValiditySignedWithHashButNotEnabled(t *testing.T) { t.Parallel() @@ -1044,11 +1185,13 @@ func TestInterceptedTransaction_CheckValiditySignedWithHashButNotEnabled(t *test shardCoordinator, createFreeTxFeeHandler(), &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, chainID, false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(minTxVersion), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) err := txi.CheckValidity() @@ -1104,11 +1247,13 @@ func TestInterceptedTransaction_CheckValiditySignedWithHashShouldWork(t *testing shardCoordinator, createFreeTxFeeHandler(), &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, chainID, true, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(minTxVersion), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) err := txi.CheckValidity() @@ -1189,11 +1334,13 @@ func TestInterceptedTransaction_ScTxDeployRecvShardIdShouldBeSendersShardId(t *t shardCoordinator, createFreeTxFeeHandler(), &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, chainID, false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(minTxVersion), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Nil(t, err) @@ -1274,6 +1421,31 @@ func TestInterceptedTransaction_GetSenderAddress(t *testing.T) { assert.NotNil(t, result) } +func TestInterceptedTransaction_GetRelayerAddress(t *testing.T) { + t.Parallel() + + relayerAddr := []byte("34567890123456789012345678901234") + minTxVersion := uint32(1) + chainID := []byte("chain") + tx := &dataTransaction.Transaction{ + Nonce: 0, + Value: big.NewInt(2), + Data: []byte("data"), + GasLimit: 3, + GasPrice: 4, + RcvAddr: recvAddress, + SndAddr: senderAddress, + Signature: sigOk, + ChainID: chainID, + Version: minTxVersion, + RelayerAddr: relayerAddr, + } + + txi, _ := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler(), chainID, minTxVersion) + result := txi.RelayerAddress() + assert.Equal(t, relayerAddr, result) +} + func TestInterceptedTransaction_CheckValiditySecondTimeDoesNotVerifySig(t *testing.T) { t.Parallel() @@ -1328,11 +1500,13 @@ func TestInterceptedTransaction_CheckValiditySecondTimeDoesNotVerifySig(t *testi shardCoordinator, createFreeTxFeeHandler(), whiteListerVerifiedTxs, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, chainID, false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(minTxVersion), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) require.Nil(t, err) @@ -1426,7 +1600,16 @@ func TestInterceptedTransaction_CheckValidityOfRelayedTx(t *testing.T) { tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxData)) txi, _ = createInterceptedTxFromPlainTxWithArgParser(tx) err = txi.CheckValidity() - assert.Equal(t, process.ErrRecursiveRelayedTxIsNotAllowed, err) + assert.True(t, strings.Contains(err.Error(), process.ErrRecursiveRelayedTxIsNotAllowed.Error())) + assert.Contains(t, err.Error(), "inner transaction") + + userTx.Data = []byte("") + userTxData, _ = marshalizer.Marshal(userTx) + tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxData)) + tx.InnerTransactions = []*dataTransaction.Transaction{{Nonce: 100}} + txi, _ = createInterceptedTxFromPlainTxWithArgParser(tx) + err = txi.CheckValidity() + assert.True(t, strings.Contains(err.Error(), process.ErrMultipleRelayedTxTypesIsNotAllowed.Error())) } func TestInterceptedTransaction_CheckValidityOfRelayedTxV2(t *testing.T) { @@ -1487,8 +1670,19 @@ func TestInterceptedTransaction_CheckValidityOfRelayedTxV2(t *testing.T) { tx.Data = []byte(core.RelayedTransactionV2 + "@" + hex.EncodeToString(userTx.RcvAddr) + "@" + hex.EncodeToString(big.NewInt(0).SetUint64(userTx.Nonce).Bytes()) + "@" + hex.EncodeToString([]byte(core.RelayedTransaction)) + "@" + hex.EncodeToString(userTx.Signature)) txi, _ = createInterceptedTxFromPlainTxWithArgParser(tx) err = txi.CheckValidity() - assert.Equal(t, process.ErrRecursiveRelayedTxIsNotAllowed, err) + assert.True(t, strings.Contains(err.Error(), process.ErrRecursiveRelayedTxIsNotAllowed.Error())) + assert.Contains(t, err.Error(), "inner transaction") + + userTx.Data = []byte("") + marshalizer := &mock.MarshalizerMock{} + userTxData, _ := marshalizer.Marshal(userTx) + tx.Data = []byte(core.RelayedTransactionV2 + "@" + hex.EncodeToString(userTxData)) + tx.InnerTransactions = []*dataTransaction.Transaction{{Nonce: 100}} + txi, _ = createInterceptedTxFromPlainTxWithArgParser(tx) + err = txi.CheckValidity() + assert.True(t, strings.Contains(err.Error(), process.ErrMultipleRelayedTxTypesIsNotAllowed.Error())) + tx.InnerTransactions = nil userTx.Signature = sigOk userTx.SndAddr = []byte("otherAddress") tx.Data = []byte(core.RelayedTransactionV2 + "@" + hex.EncodeToString(userTx.RcvAddr) + "@" + hex.EncodeToString(big.NewInt(0).SetUint64(userTx.Nonce).Bytes()) + "@" + hex.EncodeToString(userTx.Data) + "@" + hex.EncodeToString(userTx.Signature)) @@ -1497,6 +1691,177 @@ func TestInterceptedTransaction_CheckValidityOfRelayedTxV2(t *testing.T) { assert.Nil(t, err) } +func TestInterceptedTransaction_CheckValidityOfRelayedTxV3(t *testing.T) { + t.Parallel() + + minTxVersion := uint32(1) + chainID := []byte("chain") + innerTx := &dataTransaction.Transaction{ + Nonce: 1, + Value: big.NewInt(2), + Data: []byte("data inner tx 1"), + GasLimit: 3, + GasPrice: 4, + RcvAddr: recvAddress, + SndAddr: senderAddress, + Signature: sigOk, + ChainID: chainID, + Version: minTxVersion, + RelayerAddr: relayerAddress, + } + + tx := &dataTransaction.Transaction{ + Nonce: 1, + Value: big.NewInt(0), + GasLimit: 10, + GasPrice: 4, + RcvAddr: relayerAddress, + SndAddr: relayerAddress, + Signature: sigOk, + ChainID: chainID, + Version: minTxVersion, + InnerTransactions: []*dataTransaction.Transaction{innerTx}, + } + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + txi, _ := createInterceptedTxFromPlainTxWithArgParser(&txCopy) + err := txi.CheckValidity() + assert.Nil(t, err) + }) + t.Run("inner txs on inner tx should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + innerTxCopy := *innerTx + innerTxCopy.InnerTransactions = []*dataTransaction.Transaction{{}} + txCopy.InnerTransactions = []*dataTransaction.Transaction{&innerTxCopy} + + txi, _ := createInterceptedTxFromPlainTxWithArgParser(&txCopy) + err := txi.CheckValidity() + assert.Equal(t, process.ErrRecursiveRelayedTxIsNotAllowed, err) + }) + t.Run("different relayer on inner tx should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + innerTxCopy := *innerTx + innerTxCopy.RelayerAddr = recvAddress + txCopy.InnerTransactions = []*dataTransaction.Transaction{&innerTxCopy} + + txi, _ := createInterceptedTxFromPlainTxWithArgParser(&txCopy) + err := txi.CheckValidity() + assert.Equal(t, process.ErrRelayedTxV3RelayerMismatch, err) + }) + t.Run("different sender than receiver should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + innerTxCopy := *innerTx + txCopy.RcvAddr = recvAddress + txCopy.InnerTransactions = []*dataTransaction.Transaction{&innerTxCopy} + txi, _ := createInterceptedTxFromPlainTxWithArgParser(&txCopy) + err := txi.CheckValidity() + assert.Equal(t, process.ErrRelayedTxV3SenderDoesNotMatchReceiver, err) + }) + t.Run("empty signature on inner tx should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + innerTxCopy := *innerTx + innerTxCopy.Signature = nil + txCopy.InnerTransactions = []*dataTransaction.Transaction{&innerTxCopy} + txi, _ := createInterceptedTxFromPlainTxWithArgParser(&txCopy) + err := txi.CheckValidity() + assert.NotNil(t, err) + }) + t.Run("bad signature on inner tx should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + innerTxCopy := *innerTx + innerTxCopy.Signature = sigBad + txCopy.InnerTransactions = []*dataTransaction.Transaction{&innerTxCopy} + txi, _ := createInterceptedTxFromPlainTxWithArgParser(&txCopy) + err := txi.CheckValidity() + assert.NotNil(t, err) + }) + t.Run("inner tx on inner tx(recursive) should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + innerTxCopy := *innerTx + txCopy.InnerTransactions = []*dataTransaction.Transaction{&innerTxCopy} + innerTx2 := &dataTransaction.Transaction{ + Nonce: 2, + Value: big.NewInt(3), + Data: []byte(""), + GasLimit: 3, + GasPrice: 4, + RcvAddr: recvAddress, + SndAddr: senderAddress, + Signature: sigOk, + ChainID: chainID, + Version: minTxVersion, + } + innerTxCopy.InnerTransactions = []*dataTransaction.Transaction{innerTx2} + txi, _ := createInterceptedTxFromPlainTxWithArgParser(&txCopy) + err := txi.CheckValidity() + assert.NotNil(t, err) + }) + t.Run("relayed v3 not enabled yet should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + innerTxCopy := *innerTx + txCopy.InnerTransactions = []*dataTransaction.Transaction{&innerTxCopy} + marshalizer := &mock.MarshalizerMock{} + txBuff, _ := marshalizer.Marshal(&txCopy) + txi, _ := transaction.NewInterceptedTransaction( + txBuff, + marshalizer, + marshalizer, + &hashingMocks.HasherMock{}, + createKeyGenMock(), + createDummySigner(), + &testscommon.PubkeyConverterStub{ + LenCalled: func() int { + return 32 + }, + }, + mock.NewMultipleShardsCoordinatorMock(), + createFreeTxFeeHandler(), + &testscommon.WhiteListHandlerStub{}, + &testscommon.ArgumentParserMock{}, + txCopy.ChainID, + false, + &hashingMocks.HasherMock{}, + versioning.NewTxVersionChecker(0), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, + ) + + assert.NotNil(t, txi) + err := txi.CheckValidity() + assert.Equal(t, process.ErrRelayedTxV3Disabled, err) + }) + t.Run("inner txs + relayed v2 should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + innerTxCopy := *innerTx + txCopy.InnerTransactions = []*dataTransaction.Transaction{&innerTxCopy} + marshaller := &marshallerMock.MarshalizerMock{} + userTxData, _ := marshaller.Marshal(innerTxCopy) + txCopy.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxData)) + txi, _ := createInterceptedTxFromPlainTxWithArgParser(&txCopy) + err := txi.CheckValidity() + assert.Equal(t, process.ErrMultipleRelayedTxTypesIsNotAllowed, err) + }) +} + // ------- IsInterfaceNil func TestInterceptedTransaction_IsInterfaceNil(t *testing.T) { t.Parallel() @@ -1621,11 +1986,13 @@ func TestInterceptedTransaction_Fee(t *testing.T) { shardCoordinator, createFreeTxFeeHandler(), &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("T"), false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(0), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) assert.Equal(t, big.NewInt(0), txin.Fee()) @@ -1664,11 +2031,13 @@ func TestInterceptedTransaction_String(t *testing.T) { shardCoordinator, createFreeTxFeeHandler(), &testscommon.WhiteListHandlerStub{}, - &mock.ArgumentParserMock{}, + &testscommon.ArgumentParserMock{}, []byte("T"), false, &hashingMocks.HasherMock{}, versioning.NewTxVersionChecker(0), + &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + &processMocks.RelayedTxV3ProcessorMock{}, ) expectedFormat := fmt.Sprintf( diff --git a/process/transaction/metaProcess.go b/process/transaction/metaProcess.go index 83274dda551..13d6fd4715b 100644 --- a/process/transaction/metaProcess.go +++ b/process/transaction/metaProcess.go @@ -20,7 +20,6 @@ var _ process.TransactionProcessor = (*metaTxProcessor)(nil) // txProcessor implements TransactionProcessor interface and can modify account states according to a transaction type metaTxProcessor struct { *baseTxProcessor - txTypeHandler process.TxTypeHandler enableEpochsHandler common.EnableEpochsHandler } @@ -65,8 +64,8 @@ func NewMetaTxProcessor(args ArgsNewMetaTxProcessor) (*metaTxProcessor, error) { } err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.PenalizedTooMuchGasFlag, - common.BuiltInFunctionOnMetaFlag, common.ESDTFlag, + common.FixRelayedBaseCostFlag, }) if err != nil { return nil, err @@ -89,11 +88,11 @@ func NewMetaTxProcessor(args ArgsNewMetaTxProcessor) (*metaTxProcessor, error) { enableEpochsHandler: args.EnableEpochsHandler, txVersionChecker: args.TxVersionChecker, guardianChecker: args.GuardianChecker, + txTypeHandler: args.TxTypeHandler, } txProc := &metaTxProcessor{ baseTxProcessor: baseTxProcess, - txTypeHandler: args.TxTypeHandler, enableEpochsHandler: args.EnableEpochsHandler, } @@ -143,10 +142,6 @@ func (txProc *metaTxProcessor) ProcessTransaction(tx *transaction.Transaction) ( case process.SCInvoking: return txProc.processSCInvoking(tx, tx.SndAddr, tx.RcvAddr) case process.BuiltInFunctionCall: - if txProc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag) { - return txProc.processBuiltInFunctionCall(tx, tx.SndAddr, tx.RcvAddr) - } - if txProc.enableEpochsHandler.IsFlagEnabled(common.ESDTFlag) { return txProc.processSCInvoking(tx, tx.SndAddr, tx.RcvAddr) } @@ -189,18 +184,6 @@ func (txProc *metaTxProcessor) processSCInvoking( return txProc.scProcessor.ExecuteSmartContractTransaction(tx, acntSrc, acntDst) } -func (txProc *metaTxProcessor) processBuiltInFunctionCall( - tx *transaction.Transaction, - adrSrc, adrDst []byte, -) (vmcommon.ReturnCode, error) { - acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) - if err != nil { - return 0, err - } - - return txProc.scProcessor.ExecuteBuiltInFunction(tx, acntSrc, acntDst) -} - // IsInterfaceNil returns true if there is no value under the interface func (txProc *metaTxProcessor) IsInterfaceNil() bool { return txProc == nil diff --git a/process/transaction/metaProcess_test.go b/process/transaction/metaProcess_test.go index ac536af4e30..eaaa1382d2e 100644 --- a/process/transaction/metaProcess_test.go +++ b/process/transaction/metaProcess_test.go @@ -451,19 +451,6 @@ func TestMetaTxProcessor_ProcessTransactionBuiltInCallTxShouldWork(t *testing.T) assert.Nil(t, err) assert.True(t, wasCalled) assert.Equal(t, 0, saveAccountCalled) - - builtInCalled := false - scProcessorMock.ExecuteBuiltInFunctionCalled = func(tx data.TransactionHandler, acntSrc, acntDst state.UserAccountHandler) (vmcommon.ReturnCode, error) { - builtInCalled = true - return 0, nil - } - - enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionOnMetaFlag) - - _, err = txProc.ProcessTransaction(&tx) - assert.Nil(t, err) - assert.True(t, builtInCalled) - assert.Equal(t, 0, saveAccountCalled) } func TestMetaTxProcessor_ProcessTransactionWithInvalidUsernameShouldNotError(t *testing.T) { diff --git a/process/transaction/relayedTxV3Processor.go b/process/transaction/relayedTxV3Processor.go new file mode 100644 index 00000000000..1c25ad46214 --- /dev/null +++ b/process/transaction/relayedTxV3Processor.go @@ -0,0 +1,112 @@ +package transaction + +import ( + "bytes" + "fmt" + "math/big" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" +) + +const minTransactionsAllowed = 1 + +// ArgRelayedTxV3Processor is the DTO used to create a new instance of relayedTxV3Processor +type ArgRelayedTxV3Processor struct { + EconomicsFee process.FeeHandler + ShardCoordinator sharding.Coordinator + MaxTransactionsAllowed int +} + +type relayedTxV3Processor struct { + economicsFee process.FeeHandler + shardCoordinator sharding.Coordinator + maxTransactionsAllowed int +} + +// NewRelayedTxV3Processor returns a new instance of relayedTxV3Processor +func NewRelayedTxV3Processor(args ArgRelayedTxV3Processor) (*relayedTxV3Processor, error) { + err := checkArgs(args) + if err != nil { + return nil, err + } + return &relayedTxV3Processor{ + economicsFee: args.EconomicsFee, + shardCoordinator: args.ShardCoordinator, + maxTransactionsAllowed: args.MaxTransactionsAllowed, + }, nil +} + +func checkArgs(args ArgRelayedTxV3Processor) error { + if check.IfNil(args.EconomicsFee) { + return process.ErrNilEconomicsFeeHandler + } + if check.IfNil(args.ShardCoordinator) { + return process.ErrNilShardCoordinator + } + if args.MaxTransactionsAllowed < minTransactionsAllowed { + return fmt.Errorf("%w for MaxTransactionsAllowed, provided %d, min expected %d", process.ErrInvalidValue, args.MaxTransactionsAllowed, minTransactionsAllowed) + } + + return nil +} + +// CheckRelayedTx checks the relayed transaction and its inner transactions +func (proc *relayedTxV3Processor) CheckRelayedTx(tx *transaction.Transaction) error { + if len(tx.InnerTransactions) > proc.maxTransactionsAllowed { + return process.ErrRelayedTxV3TooManyInnerTransactions + } + if tx.GetValue().Cmp(big.NewInt(0)) != 0 { + return process.ErrRelayedTxV3ZeroVal + } + if !bytes.Equal(tx.RcvAddr, tx.SndAddr) { + return process.ErrRelayedTxV3SenderDoesNotMatchReceiver + } + if tx.GasLimit < proc.computeRelayedTxMinGasLimit(tx) { + return process.ErrRelayedTxV3GasLimitMismatch + } + if len(tx.Data) > 0 { + return process.ErrRelayedTxV3InvalidDataField + } + + innerTxs := tx.InnerTransactions + for _, innerTx := range innerTxs { + if !bytes.Equal(innerTx.RelayerAddr, tx.SndAddr) { + return process.ErrRelayedTxV3RelayerMismatch + } + if tx.GasPrice != innerTx.GasPrice { + return process.ErrRelayedV3GasPriceMismatch + } + if len(innerTx.InnerTransactions) > 0 { + return process.ErrRecursiveRelayedTxIsNotAllowed + } + + senderShard := proc.shardCoordinator.ComputeId(innerTx.SndAddr) + relayerShard := proc.shardCoordinator.ComputeId(innerTx.RelayerAddr) + if senderShard != relayerShard { + return process.ErrRelayedTxV3SenderShardMismatch + } + } + + return nil +} + +func (proc *relayedTxV3Processor) computeRelayedTxMinGasLimit(tx *transaction.Transaction) uint64 { + relayedTxGasLimit := proc.economicsFee.ComputeGasLimit(tx) + relayedTxMinGasLimit := proc.economicsFee.MinGasLimit() + relayedTxGasLimitDiff := relayedTxGasLimit - relayedTxMinGasLimit // this may be positive if the relayed tx is guarded + + totalGasLimit := relayedTxGasLimitDiff + relayedTxMinGasLimit*uint64(len(tx.InnerTransactions)) + for _, innerTx := range tx.InnerTransactions { + totalGasLimit += innerTx.GasLimit + } + + return totalGasLimit +} + +// IsInterfaceNil returns true if there is no value under the interface +func (proc *relayedTxV3Processor) IsInterfaceNil() bool { + return proc == nil +} diff --git a/process/transaction/relayedTxV3Processor_test.go b/process/transaction/relayedTxV3Processor_test.go new file mode 100644 index 00000000000..7f6495ebd92 --- /dev/null +++ b/process/transaction/relayedTxV3Processor_test.go @@ -0,0 +1,249 @@ +package transaction_test + +import ( + "bytes" + "errors" + "math/big" + "strings" + "testing" + + "github.com/multiversx/mx-chain-core-go/data" + coreTransaction "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/transaction" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/stretchr/testify/require" +) + +const minGasLimit = uint64(1) + +func getDefaultTx() *coreTransaction.Transaction { + return &coreTransaction.Transaction{ + Nonce: 0, + Value: big.NewInt(0), + RcvAddr: []byte("rel"), + SndAddr: []byte("rel"), + GasPrice: 1, + GasLimit: minGasLimit * 4, + InnerTransactions: []*coreTransaction.Transaction{ + { + Nonce: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcv1"), + SndAddr: []byte("snd1"), + GasPrice: 1, + GasLimit: minGasLimit, + RelayerAddr: []byte("rel"), + }, + { + Nonce: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcv1"), + SndAddr: []byte("snd2"), + GasPrice: 1, + GasLimit: minGasLimit, + RelayerAddr: []byte("rel"), + }, + }, + } +} + +func createMockArgRelayedTxV3Processor() transaction.ArgRelayedTxV3Processor { + return transaction.ArgRelayedTxV3Processor{ + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + ShardCoordinator: &testscommon.ShardsCoordinatorMock{}, + MaxTransactionsAllowed: 10, + } +} + +func TestNewRelayedTxV3Processor(t *testing.T) { + t.Parallel() + + t.Run("nil economics fee should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgRelayedTxV3Processor() + args.EconomicsFee = nil + proc, err := transaction.NewRelayedTxV3Processor(args) + require.Nil(t, proc) + require.Equal(t, process.ErrNilEconomicsFeeHandler, err) + }) + t.Run("nil shard coordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgRelayedTxV3Processor() + args.ShardCoordinator = nil + proc, err := transaction.NewRelayedTxV3Processor(args) + require.Nil(t, proc) + require.Equal(t, process.ErrNilShardCoordinator, err) + }) + t.Run("invalid max transactions allowed should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgRelayedTxV3Processor() + args.MaxTransactionsAllowed = 0 + proc, err := transaction.NewRelayedTxV3Processor(args) + require.Nil(t, proc) + require.True(t, errors.Is(err, process.ErrInvalidValue)) + require.True(t, strings.Contains(err.Error(), "MaxTransactionsAllowed")) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + proc, err := transaction.NewRelayedTxV3Processor(createMockArgRelayedTxV3Processor()) + require.NoError(t, err) + require.NotNil(t, proc) + }) +} + +func TestRelayedTxV3Processor_IsInterfaceNil(t *testing.T) { + t.Parallel() + + args := createMockArgRelayedTxV3Processor() + args.EconomicsFee = nil + proc, _ := transaction.NewRelayedTxV3Processor(args) + require.True(t, proc.IsInterfaceNil()) + + proc, _ = transaction.NewRelayedTxV3Processor(createMockArgRelayedTxV3Processor()) + require.False(t, proc.IsInterfaceNil()) +} + +func TestRelayedTxV3Processor_CheckRelayedTx(t *testing.T) { + t.Parallel() + + t.Run("invalid num of inner txs should error", func(t *testing.T) { + t.Parallel() + + tx := getDefaultTx() + args := createMockArgRelayedTxV3Processor() + args.MaxTransactionsAllowed = len(tx.InnerTransactions) - 1 + proc, err := transaction.NewRelayedTxV3Processor(args) + require.NoError(t, err) + + tx.Value = big.NewInt(1) + + err = proc.CheckRelayedTx(tx) + require.Equal(t, process.ErrRelayedTxV3TooManyInnerTransactions, err) + }) + t.Run("value on relayed tx should error", func(t *testing.T) { + t.Parallel() + + proc, err := transaction.NewRelayedTxV3Processor(createMockArgRelayedTxV3Processor()) + require.NoError(t, err) + + tx := getDefaultTx() + tx.Value = big.NewInt(1) + + err = proc.CheckRelayedTx(tx) + require.Equal(t, process.ErrRelayedTxV3ZeroVal, err) + }) + t.Run("relayed tx not to self should error", func(t *testing.T) { + t.Parallel() + + proc, err := transaction.NewRelayedTxV3Processor(createMockArgRelayedTxV3Processor()) + require.NoError(t, err) + + tx := getDefaultTx() + tx.RcvAddr = []byte("another rcv") + + err = proc.CheckRelayedTx(tx) + require.Equal(t, process.ErrRelayedTxV3SenderDoesNotMatchReceiver, err) + }) + t.Run("invalid gas limit should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgRelayedTxV3Processor() + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { + return minGasLimit + }, + } + proc, err := transaction.NewRelayedTxV3Processor(args) + require.NoError(t, err) + + tx := getDefaultTx() + tx.GasLimit = minGasLimit + + err = proc.CheckRelayedTx(tx) + require.Equal(t, process.ErrRelayedTxV3GasLimitMismatch, err) + }) + t.Run("data field not empty should error", func(t *testing.T) { + t.Parallel() + + proc, err := transaction.NewRelayedTxV3Processor(createMockArgRelayedTxV3Processor()) + require.NoError(t, err) + + tx := getDefaultTx() + tx.Data = []byte("dummy") + + err = proc.CheckRelayedTx(tx) + require.Equal(t, process.ErrRelayedTxV3InvalidDataField, err) + }) + t.Run("inner txs on inner should error", func(t *testing.T) { + t.Parallel() + + proc, err := transaction.NewRelayedTxV3Processor(createMockArgRelayedTxV3Processor()) + require.NoError(t, err) + + tx := getDefaultTx() + tx.InnerTransactions[0].InnerTransactions = []*coreTransaction.Transaction{{}} + + err = proc.CheckRelayedTx(tx) + require.Equal(t, process.ErrRecursiveRelayedTxIsNotAllowed, err) + }) + t.Run("relayer mismatch on inner should error", func(t *testing.T) { + t.Parallel() + + proc, err := transaction.NewRelayedTxV3Processor(createMockArgRelayedTxV3Processor()) + require.NoError(t, err) + + tx := getDefaultTx() + tx.InnerTransactions[0].RelayerAddr = []byte("another relayer") + + err = proc.CheckRelayedTx(tx) + require.Equal(t, process.ErrRelayedTxV3RelayerMismatch, err) + }) + t.Run("gas price mismatch on inner should error", func(t *testing.T) { + t.Parallel() + + proc, err := transaction.NewRelayedTxV3Processor(createMockArgRelayedTxV3Processor()) + require.NoError(t, err) + + tx := getDefaultTx() + tx.InnerTransactions[0].GasPrice = tx.GasPrice + 1 + + err = proc.CheckRelayedTx(tx) + require.Equal(t, process.ErrRelayedV3GasPriceMismatch, err) + }) + t.Run("shard mismatch on inner should error", func(t *testing.T) { + t.Parallel() + + tx := getDefaultTx() + args := createMockArgRelayedTxV3Processor() + args.ShardCoordinator = &testscommon.ShardsCoordinatorMock{ + ComputeIdCalled: func(address []byte) uint32 { + if bytes.Equal(address, tx.SndAddr) { + return 0 + } + + return 1 + }, + } + proc, err := transaction.NewRelayedTxV3Processor(args) + require.NoError(t, err) + + err = proc.CheckRelayedTx(tx) + require.Equal(t, process.ErrRelayedTxV3SenderShardMismatch, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + proc, err := transaction.NewRelayedTxV3Processor(createMockArgRelayedTxV3Processor()) + require.NoError(t, err) + + tx := getDefaultTx() + err = proc.CheckRelayedTx(tx) + require.NoError(t, err) + }) +} diff --git a/process/transaction/shardProcess.go b/process/transaction/shardProcess.go index 89b3572397b..129ad2c5db8 100644 --- a/process/transaction/shardProcess.go +++ b/process/transaction/shardProcess.go @@ -15,12 +15,13 @@ import ( "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + logger "github.com/multiversx/mx-chain-logger-go" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" - logger "github.com/multiversx/mx-chain-logger-go" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) var log = logger.GetOrCreate("process/transaction") @@ -37,38 +38,41 @@ type relayedFees struct { // txProcessor implements TransactionProcessor interface and can modify account states according to a transaction type txProcessor struct { *baseTxProcessor - txFeeHandler process.TransactionFeeHandler - txTypeHandler process.TxTypeHandler - receiptForwarder process.IntermediateTransactionHandler - badTxForwarder process.IntermediateTransactionHandler - argsParser process.ArgumentsParser - scrForwarder process.IntermediateTransactionHandler - signMarshalizer marshal.Marshalizer - enableEpochsHandler common.EnableEpochsHandler - txLogsProcessor process.TransactionLogProcessor + txFeeHandler process.TransactionFeeHandler + receiptForwarder process.IntermediateTransactionHandler + badTxForwarder process.IntermediateTransactionHandler + argsParser process.ArgumentsParser + scrForwarder process.IntermediateTransactionHandler + signMarshalizer marshal.Marshalizer + enableEpochsHandler common.EnableEpochsHandler + txLogsProcessor process.TransactionLogProcessor + relayedTxV3Processor process.RelayedTxV3Processor + failedTxLogsAccumulator process.FailedTxLogsAccumulator } // ArgsNewTxProcessor defines the arguments needed for new tx processor type ArgsNewTxProcessor struct { - Accounts state.AccountsAdapter - Hasher hashing.Hasher - PubkeyConv core.PubkeyConverter - Marshalizer marshal.Marshalizer - SignMarshalizer marshal.Marshalizer - ShardCoordinator sharding.Coordinator - ScProcessor process.SmartContractProcessor - TxFeeHandler process.TransactionFeeHandler - TxTypeHandler process.TxTypeHandler - EconomicsFee process.FeeHandler - ReceiptForwarder process.IntermediateTransactionHandler - BadTxForwarder process.IntermediateTransactionHandler - ArgsParser process.ArgumentsParser - ScrForwarder process.IntermediateTransactionHandler - EnableRoundsHandler process.EnableRoundsHandler - EnableEpochsHandler common.EnableEpochsHandler - TxVersionChecker process.TxVersionCheckerHandler - GuardianChecker process.GuardianChecker - TxLogsProcessor process.TransactionLogProcessor + Accounts state.AccountsAdapter + Hasher hashing.Hasher + PubkeyConv core.PubkeyConverter + Marshalizer marshal.Marshalizer + SignMarshalizer marshal.Marshalizer + ShardCoordinator sharding.Coordinator + ScProcessor process.SmartContractProcessor + TxFeeHandler process.TransactionFeeHandler + TxTypeHandler process.TxTypeHandler + EconomicsFee process.FeeHandler + ReceiptForwarder process.IntermediateTransactionHandler + BadTxForwarder process.IntermediateTransactionHandler + ArgsParser process.ArgumentsParser + ScrForwarder process.IntermediateTransactionHandler + EnableRoundsHandler process.EnableRoundsHandler + EnableEpochsHandler common.EnableEpochsHandler + TxVersionChecker process.TxVersionCheckerHandler + GuardianChecker process.GuardianChecker + TxLogsProcessor process.TransactionLogProcessor + RelayedTxV3Processor process.RelayedTxV3Processor + FailedTxLogsAccumulator process.FailedTxLogsAccumulator } // NewTxProcessor creates a new txProcessor engine @@ -128,6 +132,8 @@ func NewTxProcessor(args ArgsNewTxProcessor) (*txProcessor, error) { common.RelayedTransactionsFlag, common.RelayedTransactionsV2Flag, common.RelayedNonceFixFlag, + common.RelayedTransactionsV3Flag, + common.FixRelayedBaseCostFlag, }) if err != nil { return nil, err @@ -141,6 +147,12 @@ func NewTxProcessor(args ArgsNewTxProcessor) (*txProcessor, error) { if check.IfNil(args.TxLogsProcessor) { return nil, process.ErrNilTxLogsProcessor } + if check.IfNil(args.RelayedTxV3Processor) { + return nil, process.ErrNilRelayedTxV3Processor + } + if check.IfNil(args.FailedTxLogsAccumulator) { + return nil, process.ErrNilFailedTxLogsAccumulator + } baseTxProcess := &baseTxProcessor{ accounts: args.Accounts, @@ -153,19 +165,21 @@ func NewTxProcessor(args ArgsNewTxProcessor) (*txProcessor, error) { enableEpochsHandler: args.EnableEpochsHandler, txVersionChecker: args.TxVersionChecker, guardianChecker: args.GuardianChecker, + txTypeHandler: args.TxTypeHandler, } txProc := &txProcessor{ - baseTxProcessor: baseTxProcess, - txFeeHandler: args.TxFeeHandler, - txTypeHandler: args.TxTypeHandler, - receiptForwarder: args.ReceiptForwarder, - badTxForwarder: args.BadTxForwarder, - argsParser: args.ArgsParser, - scrForwarder: args.ScrForwarder, - signMarshalizer: args.SignMarshalizer, - enableEpochsHandler: args.EnableEpochsHandler, - txLogsProcessor: args.TxLogsProcessor, + baseTxProcessor: baseTxProcess, + txFeeHandler: args.TxFeeHandler, + receiptForwarder: args.ReceiptForwarder, + badTxForwarder: args.BadTxForwarder, + argsParser: args.ArgsParser, + scrForwarder: args.ScrForwarder, + signMarshalizer: args.SignMarshalizer, + enableEpochsHandler: args.EnableEpochsHandler, + txLogsProcessor: args.TxLogsProcessor, + relayedTxV3Processor: args.RelayedTxV3Processor, + failedTxLogsAccumulator: args.FailedTxLogsAccumulator, } return txProc, nil @@ -239,6 +253,8 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction) (vmco return txProc.processRelayedTx(tx, acntSnd, acntDst) case process.RelayedTxV2: return txProc.processRelayedTxV2(tx, acntSnd, acntDst) + case process.RelayedTxV3: + return txProc.processRelayedTxV3(tx, acntSnd) } return vmcommon.UserError, txProc.executingFailedTransaction(tx, acntSnd, process.ErrWrongTransaction) @@ -274,7 +290,7 @@ func (txProc *txProcessor) executeAfterFailedMoveBalanceTransaction( return nil } - err = txProc.badTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{tx}) + err = txProc.badTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{tx}, txHash) if err != nil { return err } @@ -300,13 +316,13 @@ func (txProc *txProcessor) executingFailedTransaction( return err } - acntSnd.IncreaseNonce(1) - err = txProc.badTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{tx}) + txHash, err := core.CalculateHash(txProc.marshalizer, txProc.hasher, tx) if err != nil { return err } - txHash, err := core.CalculateHash(txProc.marshalizer, txProc.hasher, tx) + acntSnd.IncreaseNonce(1) + err = txProc.badTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{tx}, txHash) if err != nil { return err } @@ -320,7 +336,7 @@ func (txProc *txProcessor) executingFailedTransaction( TxHash: txHash, } - err = txProc.receiptForwarder.AddIntermediateTransactions([]data.TransactionHandler{rpt}) + err = txProc.receiptForwarder.AddIntermediateTransactions([]data.TransactionHandler{rpt}, txHash) if err != nil { return err } @@ -366,7 +382,7 @@ func (txProc *txProcessor) createReceiptWithReturnedGas( TxHash: txHash, } - err := txProc.receiptForwarder.AddIntermediateTransactions([]data.TransactionHandler{rpt}) + err := txProc.receiptForwarder.AddIntermediateTransactions([]data.TransactionHandler{rpt}, txHash) if err != nil { return err } @@ -385,7 +401,8 @@ func (txProc *txProcessor) processTxFee( } if isUserTxOfRelayed { - totalCost := txProc.economicsFee.ComputeFeeForProcessing(tx, tx.GasLimit) + totalCost := txProc.computeInnerTxFee(tx) + err := acntSnd.SubFromBalance(totalCost) if err != nil { return nil, nil, err @@ -397,6 +414,10 @@ func (txProc *txProcessor) processTxFee( moveBalanceGasLimit := txProc.economicsFee.ComputeGasLimit(tx) currentShardFee := txProc.economicsFee.ComputeFeeForProcessing(tx, moveBalanceGasLimit) + if txProc.enableEpochsHandler.IsFlagEnabled(common.FixRelayedBaseCostFlag) { + currentShardFee = txProc.economicsFee.ComputeMoveBalanceFee(tx) + } + return currentShardFee, totalCost, nil } @@ -558,8 +579,8 @@ func (txProc *txProcessor) finishExecutionOfRelayedTx( tx *transaction.Transaction, userTx *transaction.Transaction, ) (vmcommon.ReturnCode, error) { - computedFees := txProc.computeRelayedTxFees(tx) - txHash, err := txProc.processTxAtRelayer(relayerAcnt, computedFees.totalFee, computedFees.relayerFee, tx) + computedFees := txProc.computeRelayedTxFees(tx, userTx) + err := txProc.processTxAtRelayer(relayerAcnt, computedFees.totalFee, computedFees.relayerFee, tx) if err != nil { return 0, err } @@ -568,12 +589,31 @@ func (txProc *txProcessor) finishExecutionOfRelayedTx( return vmcommon.Ok, nil } - err = txProc.addFeeAndValueToDest(acntDst, tx, computedFees.remainingFee) + err = txProc.addFeeAndValueToDest(acntDst, tx.Value, computedFees.remainingFee) if err != nil { return 0, err } - return txProc.processUserTx(tx, userTx, tx.Value, tx.Nonce, txHash) + originalTxHash, err := core.CalculateHash(txProc.marshalizer, txProc.hasher, tx) + if err != nil { + errRemove := txProc.removeValueAndConsumedFeeFromUser(userTx, tx.Value, originalTxHash, tx, err) + if errRemove != nil { + return vmcommon.UserError, errRemove + } + + return vmcommon.UserError, txProc.executeFailedRelayedUserTx( + userTx, + tx.SndAddr, + tx.Value, + tx.Nonce, + tx, + originalTxHash, + err.Error()) + } + + defer txProc.saveFailedLogsIfNeeded(originalTxHash) + + return txProc.processUserTx(tx, userTx, tx.Value, tx.Nonce, originalTxHash) } func (txProc *txProcessor) processTxAtRelayer( @@ -581,37 +621,37 @@ func (txProc *txProcessor) processTxAtRelayer( totalFee *big.Int, relayerFee *big.Int, tx *transaction.Transaction, -) ([]byte, error) { - txHash, err := core.CalculateHash(txProc.marshalizer, txProc.hasher, tx) - if err != nil { - return nil, err - } - +) error { if !check.IfNil(relayerAcnt) { - err = relayerAcnt.SubFromBalance(tx.GetValue()) + err := relayerAcnt.SubFromBalance(tx.GetValue()) if err != nil { - return nil, err + return err } err = relayerAcnt.SubFromBalance(totalFee) if err != nil { - return nil, err + return err } relayerAcnt.IncreaseNonce(1) err = txProc.accounts.SaveAccount(relayerAcnt) if err != nil { - return nil, err + return err + } + + txHash, err := core.CalculateHash(txProc.marshalizer, txProc.hasher, tx) + if err != nil { + return err } txProc.txFeeHandler.ProcessTransactionFee(relayerFee, big.NewInt(0), txHash) } - return txHash, nil + return nil } -func (txProc *txProcessor) addFeeAndValueToDest(acntDst state.UserAccountHandler, tx *transaction.Transaction, remainingFee *big.Int) error { - err := acntDst.AddToBalance(tx.GetValue()) +func (txProc *txProcessor) addFeeAndValueToDest(acntDst state.UserAccountHandler, txValue *big.Int, remainingFee *big.Int) error { + err := acntDst.AddToBalance(txValue) if err != nil { return err } @@ -624,6 +664,128 @@ func (txProc *txProcessor) addFeeAndValueToDest(acntDst state.UserAccountHandler return txProc.accounts.SaveAccount(acntDst) } +func (txProc *txProcessor) processRelayedTxV3( + tx *transaction.Transaction, + relayerAcnt state.UserAccountHandler, +) (vmcommon.ReturnCode, error) { + if !txProc.enableEpochsHandler.IsFlagEnabled(common.RelayedTransactionsV3Flag) { + return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, process.ErrRelayedTxV3Disabled) + } + if check.IfNil(relayerAcnt) { + return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, process.ErrNilRelayerAccount) + } + err := txProc.relayedTxV3Processor.CheckRelayedTx(tx) + if err != nil { + return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, err) + } + + snapshot := txProc.accounts.JournalLen() + + // process fees on both relayer and sender + relayerFee, totalFee, err := txProc.economicsFee.ComputeRelayedTxFees(tx) + if err != nil { + return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, err) + } + + err = txProc.processTxAtRelayer(relayerAcnt, totalFee, relayerFee, tx) + if err != nil { + return 0, err + } + + innerTxs := tx.GetInnerTransactions() + + originalTxHash, err := core.CalculateHash(txProc.marshalizer, txProc.hasher, tx) + if err != nil { + return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, err) + } + + var innerTxRetCode vmcommon.ReturnCode + var innerTxErr error + var innerTxFee *big.Int + innerTxsTotalFees := big.NewInt(0) + executedUserTxs := make([]*transaction.Transaction, 0) + for _, innerTx := range innerTxs { + innerTxFee, innerTxRetCode, innerTxErr = txProc.processInnerTx(tx, innerTx, originalTxHash) + innerTxsTotalFees.Add(innerTxsTotalFees, innerTxFee) + if innerTxErr != nil || innerTxRetCode != vmcommon.Ok { + continue + } + + executedUserTxs = append(executedUserTxs, innerTx) + } + + allUserTxsSucceeded := len(executedUserTxs) == len(innerTxs) && innerTxErr == nil && innerTxRetCode == vmcommon.Ok + if !allUserTxsSucceeded { + log.Trace("failed to execute all inner transactions", "total", len(innerTxs), "executed transactions", len(executedUserTxs)) + + txProc.saveFailedLogsIfNeeded(originalTxHash) + } + + expectedMaxInnerTxsTotalFees := big.NewInt(0).Sub(totalFee, relayerFee) + if innerTxsTotalFees.Cmp(expectedMaxInnerTxsTotalFees) > 0 { + log.Debug("reverting relayed transaction, total inner transactions fees mismatch", + "computed max fees at relayer", expectedMaxInnerTxsTotalFees.Uint64(), + "total inner fees consumed", innerTxsTotalFees.Uint64()) + + errRevert := txProc.accounts.RevertToSnapshot(snapshot) + if errRevert != nil { + return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, errRevert) + } + + return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, process.ErrConsumedFeesMismatch) + } + + return vmcommon.Ok, nil +} + +func (txProc *txProcessor) processInnerTx( + tx *transaction.Transaction, + innerTx *transaction.Transaction, + originalTxHash []byte, +) (*big.Int, vmcommon.ReturnCode, error) { + + txFee := txProc.computeInnerTxFee(innerTx) + + acntSnd, err := txProc.getAccountFromAddress(innerTx.SndAddr) + if err != nil { + return txFee, vmcommon.UserError, txProc.executeFailedRelayedUserTx( + innerTx, + innerTx.RelayerAddr, + big.NewInt(0), + tx.Nonce, + tx, + originalTxHash, + err.Error()) + } + + if check.IfNil(acntSnd) { + return txFee, vmcommon.UserError, txProc.executeFailedRelayedUserTx( + innerTx, + innerTx.RelayerAddr, + big.NewInt(0), + tx.Nonce, + tx, + originalTxHash, + process.ErrRelayedTxV3SenderShardMismatch.Error()) + } + + // TODO: remove adding and then removing the fee at the sender + err = txProc.addFeeAndValueToDest(acntSnd, big.NewInt(0), txFee) + if err != nil { + return txFee, vmcommon.UserError, txProc.executeFailedRelayedUserTx( + innerTx, + innerTx.RelayerAddr, + big.NewInt(0), + tx.Nonce, + tx, + originalTxHash, + err.Error()) + } + + result, err := txProc.processUserTx(tx, innerTx, tx.Value, tx.Nonce, originalTxHash) + return txFee, result, err +} + func (txProc *txProcessor) processRelayedTxV2( tx *transaction.Transaction, relayerAcnt, acntDst state.UserAccountHandler, @@ -643,6 +805,10 @@ func (txProc *txProcessor) processRelayedTxV2( return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, process.ErrInvalidArguments) } + if len(tx.InnerTransactions) > 0 { + return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, process.ErrMultipleRelayedTxTypesIsNotAllowed) + } + userTx := makeUserTxFromRelayedTxV2Args(args) userTx.GasPrice = tx.GasPrice userTx.GasLimit = tx.GasLimit - txProc.economicsFee.ComputeGasLimit(tx) @@ -666,6 +832,9 @@ func (txProc *txProcessor) processRelayedTx( if !txProc.enableEpochsHandler.IsFlagEnabled(common.RelayedTransactionsFlag) { return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, process.ErrRelayedTxDisabled) } + if len(tx.InnerTransactions) > 0 { + return vmcommon.UserError, txProc.executingFailedTransaction(tx, relayerAcnt, process.ErrMultipleRelayedTxTypesIsNotAllowed) + } userTx := &transaction.Transaction{} err = txProc.signMarshalizer.Unmarshal(userTx, args[0]) @@ -691,9 +860,14 @@ func (txProc *txProcessor) processRelayedTx( return txProc.finishExecutionOfRelayedTx(relayerAcnt, acntDst, tx, userTx) } -func (txProc *txProcessor) computeRelayedTxFees(tx *transaction.Transaction) relayedFees { +func (txProc *txProcessor) computeRelayedTxFees(tx, userTx *transaction.Transaction) relayedFees { relayerFee := txProc.economicsFee.ComputeMoveBalanceFee(tx) totalFee := txProc.economicsFee.ComputeTxFee(tx) + if txProc.enableEpochsHandler.IsFlagEnabled(common.FixRelayedBaseCostFlag) { + userFee := txProc.computeInnerTxFeeAfterBaseCostFix(userTx) + + totalFee = totalFee.Add(relayerFee, userFee) + } remainingFee := big.NewInt(0).Sub(totalFee, relayerFee) computedFees := relayedFees{ @@ -724,7 +898,8 @@ func (txProc *txProcessor) removeValueAndConsumedFeeFromUser( return err } - consumedFee := txProc.economicsFee.ComputeFeeForProcessing(userTx, userTx.GasLimit) + consumedFee := txProc.computeInnerTxFee(userTx) + err = userAcnt.SubFromBalance(consumedFee) if err != nil { return err @@ -757,7 +932,8 @@ func (txProc *txProcessor) addNonExecutableLog(executionErr error, originalTxHas Address: originalTx.GetRcvAddr(), } - return txProc.txLogsProcessor.SaveLog(originalTxHash, originalTx, []*vmcommon.LogEntry{logEntry}) + return txProc.failedTxLogsAccumulator.SaveLogs(originalTxHash, originalTx, []*vmcommon.LogEntry{logEntry}) + } func (txProc *txProcessor) processMoveBalanceCostRelayedUserTx( @@ -768,6 +944,9 @@ func (txProc *txProcessor) processMoveBalanceCostRelayedUserTx( ) error { moveBalanceGasLimit := txProc.economicsFee.ComputeGasLimit(userTx) moveBalanceUserFee := txProc.economicsFee.ComputeFeeForProcessing(userTx, moveBalanceGasLimit) + if txProc.enableEpochsHandler.IsFlagEnabled(common.FixRelayedBaseCostFlag) { + moveBalanceUserFee = txProc.economicsFee.ComputeMoveBalanceFee(userTx) + } userScrHash, err := core.CalculateHash(txProc.marshalizer, txProc.hasher, userScr) if err != nil { @@ -783,21 +962,26 @@ func (txProc *txProcessor) processUserTx( userTx *transaction.Transaction, relayedTxValue *big.Int, relayedNonce uint64, - txHash []byte, + originalTxHash []byte, ) (vmcommon.ReturnCode, error) { + relayerAdr := originalTx.SndAddr acntSnd, acntDst, err := txProc.getAccounts(userTx.SndAddr, userTx.RcvAddr) if err != nil { - return 0, err - } - - var originalTxHash []byte - originalTxHash, err = core.CalculateHash(txProc.marshalizer, txProc.hasher, originalTx) - if err != nil { - return 0, err + errRemove := txProc.removeValueAndConsumedFeeFromUser(userTx, relayedTxValue, originalTxHash, originalTx, err) + if errRemove != nil { + return vmcommon.UserError, errRemove + } + return vmcommon.UserError, txProc.executeFailedRelayedUserTx( + userTx, + relayerAdr, + relayedTxValue, + relayedNonce, + originalTx, + originalTxHash, + err.Error()) } - relayerAdr := originalTx.SndAddr txType, dstShardTxType := txProc.txTypeHandler.ComputeTransactionType(userTx) err = txProc.checkTxValues(userTx, acntSnd, acntDst, true) if err != nil { @@ -811,11 +995,11 @@ func (txProc *txProcessor) processUserTx( relayedTxValue, relayedNonce, originalTx, - txHash, + originalTxHash, err.Error()) } - scrFromTx, err := txProc.makeSCRFromUserTx(userTx, relayerAdr, relayedTxValue, txHash) + scrFromTx, err := txProc.makeSCRFromUserTx(userTx, relayerAdr, relayedTxValue, originalTxHash) if err != nil { return 0, err } @@ -824,6 +1008,10 @@ func (txProc *txProcessor) processUserTx( switch txType { case process.MoveBalance: err = txProc.processMoveBalance(userTx, acntSnd, acntDst, dstShardTxType, originalTxHash, true) + intraShard := txProc.shardCoordinator.SameShard(userTx.SndAddr, userTx.RcvAddr) + if err == nil && intraShard { + txProc.createCompleteEventLog(scrFromTx, originalTxHash) + } case process.SCDeployment: err = txProc.processMoveBalanceCostRelayedUserTx(userTx, scrFromTx, acntSnd, originalTxHash) if err != nil { @@ -857,7 +1045,7 @@ func (txProc *txProcessor) processUserTx( relayedTxValue, relayedNonce, originalTx, - txHash, + originalTxHash, err.Error()) } @@ -868,7 +1056,7 @@ func (txProc *txProcessor) processUserTx( relayedTxValue, relayedNonce, originalTx, - txHash, + originalTxHash, err.Error()) } @@ -890,7 +1078,7 @@ func (txProc *txProcessor) processUserTx( return returnCode, nil } - err = txProc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrFromTx}) + err = txProc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrFromTx}, originalTxHash) if err != nil { return 0, err } @@ -963,17 +1151,28 @@ func (txProc *txProcessor) executeFailedRelayedUserTx( return err } - err = txProc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrForRelayer}) + err = txProc.scrForwarder.AddIntermediateTransactions([]data.TransactionHandler{scrForRelayer}, originalTxHash) if err != nil { return err } totalFee := txProc.economicsFee.ComputeFeeForProcessing(userTx, userTx.GasLimit) + moveBalanceGasLimit := txProc.economicsFee.ComputeGasLimit(userTx) + gasToUse := userTx.GetGasLimit() - moveBalanceGasLimit + processingUserFee := txProc.economicsFee.ComputeFeeForProcessing(userTx, gasToUse) + if txProc.enableEpochsHandler.IsFlagEnabled(common.FixRelayedBaseCostFlag) { + moveBalanceUserFee := txProc.economicsFee.ComputeMoveBalanceFee(userTx) + totalFee = big.NewInt(0).Add(moveBalanceUserFee, processingUserFee) + } + senderShardID := txProc.shardCoordinator.ComputeId(userTx.SndAddr) if senderShardID != txProc.shardCoordinator.SelfId() { - moveBalanceGasLimit := txProc.economicsFee.ComputeGasLimit(userTx) - moveBalanceUserFee := txProc.economicsFee.ComputeFeeForProcessing(userTx, moveBalanceGasLimit) - totalFee.Sub(totalFee, moveBalanceUserFee) + if txProc.enableEpochsHandler.IsFlagEnabled(common.FixRelayedBaseCostFlag) { + totalFee.Sub(totalFee, processingUserFee) + } else { + moveBalanceUserFee := txProc.economicsFee.ComputeFeeForProcessing(userTx, moveBalanceGasLimit) + totalFee.Sub(totalFee, moveBalanceUserFee) + } } txProc.txFeeHandler.ProcessTransactionFee(totalFee, big.NewInt(0), originalTxHash) @@ -984,8 +1183,8 @@ func (txProc *txProcessor) executeFailedRelayedUserTx( return err } - if txProc.enableEpochsHandler.IsFlagEnabled(common.AddFailedRelayedTxToInvalidMBsFlag) { - err = txProc.badTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{originalTx}) + if txProc.enableEpochsHandler.IsFlagEnabled(common.AddFailedRelayedTxToInvalidMBsFlag) && !isRelayedV3(originalTx.InnerTransactions) { + err = txProc.badTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{originalTx}, originalTxHash) if err != nil { return err } @@ -1018,6 +1217,36 @@ func isNonExecutableError(executionErr error) bool { errors.Is(executionErr, process.ErrTransactionNotExecutable) } +func (txProc *txProcessor) createCompleteEventLog(scr data.TransactionHandler, originalTxHash []byte) { + scrHash, err := core.CalculateHash(txProc.marshalizer, txProc.hasher, scr) + if err != nil { + scrHash = originalTxHash + } + + completedTxLog := &vmcommon.LogEntry{ + Identifier: []byte(core.CompletedTxEventIdentifier), + Address: scr.GetRcvAddr(), + Topics: [][]byte{scrHash}, + } + + ignorableError := txProc.txLogsProcessor.SaveLog(scrHash, scr, []*vmcommon.LogEntry{completedTxLog}) + if ignorableError != nil { + log.Debug("txProcessor.createCompleteEventLog txLogsProcessor.SaveLog()", "error", ignorableError.Error()) + } +} + +func (txProc *txProcessor) saveFailedLogsIfNeeded(originalTxHash []byte) { + logsTx, logs, ok := txProc.failedTxLogsAccumulator.GetLogs(originalTxHash) + if ok { + ignorableErr := txProc.txLogsProcessor.SaveLog(originalTxHash, logsTx, logs) + if ignorableErr != nil { + log.Debug("txLogsProcessor.SaveLog failed", "error", ignorableErr.Error()) + } + } + + txProc.failedTxLogsAccumulator.Remove(originalTxHash) +} + // IsInterfaceNil returns true if there is no value under the interface func (txProc *txProcessor) IsInterfaceNil() bool { return txProc == nil diff --git a/process/transaction/shardProcess_test.go b/process/transaction/shardProcess_test.go index b79b8b21ffc..1f077525ae2 100644 --- a/process/transaction/shardProcess_test.go +++ b/process/transaction/shardProcess_test.go @@ -13,6 +13,12 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-vm-common-go/builtInFunctions" + "github.com/multiversx/mx-chain-vm-common-go/parsers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/coordinator" @@ -26,14 +32,13 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/vm" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/multiversx/mx-chain-vm-common-go/builtInFunctions" - "github.com/multiversx/mx-chain-vm-common-go/parsers" - "github.com/stretchr/testify/assert" ) +var txHash = []byte("hash") + func generateRandomByteSlice(size int) []byte { buff := make([]byte, size) _, _ = rand.Reader.Read(buff) @@ -74,25 +79,27 @@ func createAccountStub(sndAddr, rcvAddr []byte, func createArgsForTxProcessor() txproc.ArgsNewTxProcessor { args := txproc.ArgsNewTxProcessor{ - Accounts: &stateMock.AccountsStub{}, - Hasher: &hashingMocks.HasherMock{}, - PubkeyConv: createMockPubKeyConverter(), - Marshalizer: &mock.MarshalizerMock{}, - SignMarshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - ScProcessor: &testscommon.SCProcessorMock{}, - TxFeeHandler: &mock.FeeAccumulatorStub{}, - TxTypeHandler: &testscommon.TxTypeHandlerMock{}, - EconomicsFee: feeHandlerMock(), - ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, - BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, - ArgsParser: &mock.ArgumentParserMock{}, - ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag), - GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - TxLogsProcessor: &mock.TxLogsProcessorStub{}, - EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, + Accounts: &stateMock.AccountsStub{}, + Hasher: &hashingMocks.HasherMock{}, + PubkeyConv: createMockPubKeyConverter(), + Marshalizer: &mock.MarshalizerMock{}, + SignMarshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + ScProcessor: &testscommon.SCProcessorMock{}, + TxFeeHandler: &mock.FeeAccumulatorStub{}, + TxTypeHandler: &testscommon.TxTypeHandlerMock{}, + EconomicsFee: feeHandlerMock(), + ReceiptForwarder: &mock.IntermediateTransactionHandlerMock{}, + BadTxForwarder: &mock.IntermediateTransactionHandlerMock{}, + ArgsParser: &testscommon.ArgumentParserMock{}, + ScrForwarder: &mock.IntermediateTransactionHandlerMock{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag, common.FixRelayedBaseCostFlag), + GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxLogsProcessor: &mock.TxLogsProcessorStub{}, + EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, + RelayedTxV3Processor: &processMocks.RelayedTxV3ProcessorMock{}, + FailedTxLogsAccumulator: &processMocks.FailedTxLogsAccumulatorMock{}, } return args } @@ -102,7 +109,7 @@ func createTxProcessor() txproc.TxProcessor { return txProc } -//------- NewTxProcessor +// ------- NewTxProcessor func TestNewTxProcessor_NilAccountsShouldErr(t *testing.T) { t.Parallel() @@ -302,6 +309,50 @@ func TestNewTxProcessor_NilEnableRoundsHandlerShouldErr(t *testing.T) { assert.Nil(t, txProc) } +func TestNewTxProcessor_NilTxVersionCheckerShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForTxProcessor() + args.TxVersionChecker = nil + txProc, err := txproc.NewTxProcessor(args) + + assert.Equal(t, process.ErrNilTransactionVersionChecker, err) + assert.Nil(t, txProc) +} + +func TestNewTxProcessor_NilGuardianCheckerShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForTxProcessor() + args.GuardianChecker = nil + txProc, err := txproc.NewTxProcessor(args) + + assert.Equal(t, process.ErrNilGuardianChecker, err) + assert.Nil(t, txProc) +} + +func TestNewTxProcessor_NilRelayedTxV3ProcessorShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForTxProcessor() + args.RelayedTxV3Processor = nil + txProc, err := txproc.NewTxProcessor(args) + + assert.Equal(t, process.ErrNilRelayedTxV3Processor, err) + assert.Nil(t, txProc) +} + +func TestNewTxProcessor_NilFailedTxLogsAccumulatorShouldErr(t *testing.T) { + t.Parallel() + + args := createArgsForTxProcessor() + args.FailedTxLogsAccumulator = nil + txProc, err := txproc.NewTxProcessor(args) + + assert.Equal(t, process.ErrNilFailedTxLogsAccumulator, err) + assert.Nil(t, txProc) +} + func TestNewTxProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -312,7 +363,7 @@ func TestNewTxProcessor_OkValsShouldWork(t *testing.T) { assert.NotNil(t, txProc) } -//------- getAccounts +// ------- getAccounts func TestTxProcessor_GetAccountsShouldErrNilAddressContainer(t *testing.T) { t.Parallel() @@ -479,7 +530,7 @@ func TestTxProcessor_GetSameAccountShouldWork(t *testing.T) { assert.True(t, a1 == a2) } -//------- checkTxValues +// ------- checkTxValues func TestTxProcessor_CheckTxValuesHigherNonceShouldErr(t *testing.T) { t.Parallel() @@ -602,7 +653,7 @@ func TestTxProcessor_CheckTxValuesOkValsShouldErr(t *testing.T) { assert.Nil(t, err) } -//------- increaseNonce +// ------- increaseNonce func TestTxProcessor_IncreaseNonceOkValsShouldWork(t *testing.T) { t.Parallel() @@ -618,7 +669,7 @@ func TestTxProcessor_IncreaseNonceOkValsShouldWork(t *testing.T) { assert.Equal(t, uint64(46), acntSrc.GetNonce()) } -//------- ProcessTransaction +// ------- ProcessTransaction func TestTxProcessor_ProcessTransactionNilTxShouldErr(t *testing.T) { t.Parallel() @@ -651,7 +702,7 @@ func TestTxProcessor_ProcessTransactionMalfunctionAccountsShouldErr(t *testing.T func TestTxProcessor_ProcessCheckNotPassShouldErr(t *testing.T) { t.Parallel() - //these values will trigger ErrHigherNonceInTransaction + // these values will trigger ErrHigherNonceInTransaction tx := transaction.Transaction{} tx.Nonce = 1 tx.SndAddr = []byte("SRC") @@ -1464,8 +1515,8 @@ func TestTxProcessor_ProcessTxFeeMoveBalanceUserTx(t *testing.T) { cost, totalCost, err := execTx.ProcessTxFee(tx, acntSnd, nil, process.MoveBalance, true) assert.Nil(t, err) - assert.True(t, cost.Cmp(processingFee) == 0) - assert.True(t, totalCost.Cmp(processingFee) == 0) + assert.True(t, cost.Cmp(moveBalanceFee) == 0) + assert.True(t, totalCost.Cmp(moveBalanceFee) == 0) } func TestTxProcessor_ProcessTxFeeSCInvokeUserTx(t *testing.T) { @@ -1476,6 +1527,7 @@ func TestTxProcessor_ProcessTxFeeSCInvokeUserTx(t *testing.T) { negMoveBalanceFee := big.NewInt(0).Neg(moveBalanceFee) gasPerByte := uint64(1) args := createArgsForTxProcessor() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.PenalizedTooMuchGasFlag) args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { return moveBalanceFee @@ -1834,7 +1886,7 @@ func TestTxProcessor_ProcessRelayedTransactionV2ArgsParserShouldErr(t *testing.T parseError := errors.New("parse error") args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return "", nil, parseError }} @@ -2013,6 +2065,530 @@ func TestTxProcessor_ProcessRelayedTransactionV2(t *testing.T) { assert.Equal(t, vmcommon.Ok, returnCode) } +func TestTxProcessor_ProcessRelayedTransactionV3(t *testing.T) { + t.Parallel() + + marshaller := &mock.MarshalizerMock{} + + userAddr := []byte("user") + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("sSRC") + tx.RcvAddr = []byte("sSRC") + tx.Value = big.NewInt(0) + tx.GasPrice = 1 + tx.GasLimit = 8 + + userTx := &transaction.Transaction{} + userTx.Nonce = 0 + userTx.SndAddr = userAddr + userTx.RcvAddr = []byte("sDST") + userTx.Value = big.NewInt(0) + userTx.Data = []byte("execute@param1") + userTx.GasPrice = 1 + userTx.GasLimit = 4 + userTx.RelayerAddr = tx.SndAddr + + tx.InnerTransactions = []*transaction.Transaction{userTx} + + t.Run("flag not active should error", func(t *testing.T) { + t.Parallel() + + pubKeyConverter := testscommon.NewPubkeyConverterMock(4) + acntSrc := createUserAcc(tx.SndAddr) + _ = acntSrc.AddToBalance(big.NewInt(100)) + acntDst := createUserAcc(tx.RcvAddr) + _ = acntDst.AddToBalance(big.NewInt(10)) + + acntFinal := createUserAcc(userTx.RcvAddr) + _ = acntFinal.AddToBalance(big.NewInt(10)) + + adb := &stateMock.AccountsStub{} + adb.LoadAccountCalled = func(address []byte) (vmcommon.AccountHandler, error) { + if bytes.Equal(address, tx.SndAddr) { + return acntSrc, nil + } + if bytes.Equal(address, tx.RcvAddr) { + return acntDst, nil + } + if bytes.Equal(address, userTx.RcvAddr) { + return acntFinal, nil + } + + return nil, errors.New("failure") + } + + scProcessorMock := &testscommon.SCProcessorMock{} + shardC, _ := sharding.NewMultiShardCoordinator(1, 0) + esdtTransferParser, _ := parsers.NewESDTTransferParser(marshaller) + argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), + } + txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) + + args := createArgsForTxProcessor() + args.Accounts = adb + args.ScProcessor = scProcessorMock + args.ShardCoordinator = shardC + args.TxTypeHandler = txTypeHandler + args.PubkeyConv = pubKeyConverter + args.ArgsParser = smartContract.NewArgumentParser() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + execTx, _ := txproc.NewTxProcessor(args) + + returnCode, err := execTx.ProcessTransaction(tx) + assert.Equal(t, process.ErrFailedTransaction, err) + assert.Equal(t, vmcommon.UserError, returnCode) + }) + t.Run("value on parent tx should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + txCopy.Value = big.NewInt(1) + testProcessRelayedTransactionV3(t, &txCopy, userTx.SndAddr, userTx.RcvAddr, process.ErrFailedTransaction, vmcommon.UserError) + }) + t.Run("different receiver on tx should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + txCopy.RcvAddr = userTx.SndAddr + testProcessRelayedTransactionV3(t, &txCopy, userTx.SndAddr, userTx.RcvAddr, process.ErrFailedTransaction, vmcommon.UserError) + }) + t.Run("empty relayer on inner tx should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + userTxCopy := *userTx + userTxCopy.RelayerAddr = nil + txCopy.InnerTransactions = []*transaction.Transaction{&userTxCopy} + testProcessRelayedTransactionV3(t, &txCopy, userTx.SndAddr, userTx.RcvAddr, process.ErrFailedTransaction, vmcommon.UserError) + }) + t.Run("different relayer on inner tx should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + userTxCopy := *userTx + userTxCopy.RelayerAddr = []byte("other") + txCopy.InnerTransactions = []*transaction.Transaction{&userTxCopy} + testProcessRelayedTransactionV3(t, &txCopy, userTx.SndAddr, userTx.RcvAddr, process.ErrFailedTransaction, vmcommon.UserError) + }) + t.Run("different gas price on inner tx should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + txCopy.GasPrice = userTx.GasPrice + 1 + testProcessRelayedTransactionV3(t, &txCopy, userTx.SndAddr, userTx.RcvAddr, process.ErrFailedTransaction, vmcommon.UserError) + }) + t.Run("higher gas limit on inner tx should error", func(t *testing.T) { + t.Parallel() + + txCopy := *tx + txCopy.GasLimit = userTx.GasLimit - 1 + testProcessRelayedTransactionV3(t, &txCopy, userTx.SndAddr, userTx.RcvAddr, process.ErrFailedTransaction, vmcommon.UserError) + }) + t.Run("failure to add fees on destination should skip transaction and continue", func(t *testing.T) { + t.Parallel() + + providedAddrFail := []byte("fail addr") + providedInitialBalance := big.NewInt(100) + pubKeyConverter := testscommon.NewPubkeyConverterMock(4) + + accounts := map[string]state.UserAccountHandler{} + adb := &stateMock.AccountsStub{} + adb.LoadAccountCalled = func(address []byte) (vmcommon.AccountHandler, error) { + if bytes.Equal(address, providedAddrFail) { + return &stateMock.UserAccountStub{ + AddToBalanceCalled: func(value *big.Int) error { + return errors.New("won't add to balance") + }, + }, nil + } + + acnt, exists := accounts[string(address)] + if !exists { + acnt = createUserAcc(address) + accounts[string(address)] = acnt + _ = acnt.AddToBalance(providedInitialBalance) + } + + return acnt, nil + } + + scProcessorMock := &testscommon.SCProcessorMock{} + shardC, _ := sharding.NewMultiShardCoordinator(1, 0) + esdtTransferParser, _ := parsers.NewESDTTransferParser(marshaller) + argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), + } + txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) + + args := createArgsForTxProcessor() + args.Accounts = adb + args.ScProcessor = scProcessorMock + args.ShardCoordinator = shardC + args.TxTypeHandler = txTypeHandler + args.PubkeyConv = pubKeyConverter + args.ArgsParser = smartContract.NewArgumentParser() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedTransactionsV3Flag, common.FixRelayedBaseCostFlag) + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { + return big.NewInt(1) + }, + ComputeRelayedTxFeesCalled: func(tx data.TransactionWithFeeHandler) (*big.Int, *big.Int, error) { + relayerFee := big.NewInt(0).SetInt64(int64(len(tx.GetUserTransactions()))) // gasPrice = 1 + totalFee := *relayerFee + for _, innerTx := range tx.GetUserTransactions() { + totalFee.Add(&totalFee, big.NewInt(0).SetUint64(innerTx.GetGasLimit())) + } + + return relayerFee, &totalFee, nil + }, + } + args.RelayedTxV3Processor, _ = txproc.NewRelayedTxV3Processor(txproc.ArgRelayedTxV3Processor{ + EconomicsFee: args.EconomicsFee, + ShardCoordinator: args.ShardCoordinator, + MaxTransactionsAllowed: 10, + }) + logs := make([]*vmcommon.LogEntry, 0) + args.TxLogsProcessor = &mock.TxLogsProcessorStub{ + SaveLogCalled: func(txHash []byte, tx data.TransactionHandler, vmLogs []*vmcommon.LogEntry) error { + logs = append(logs, vmLogs...) + return nil + }, + } + execTx, _ := txproc.NewTxProcessor(args) + + txCopy := *tx + innerTx1 := &transaction.Transaction{ + Nonce: 0, + Value: big.NewInt(10), + RcvAddr: []byte("sDST"), + SndAddr: []byte("sender inner tx 1"), + GasPrice: 1, + GasLimit: 1, + RelayerAddr: txCopy.SndAddr, + } + innerTx2 := &transaction.Transaction{ + Nonce: 0, + Value: big.NewInt(10), + RcvAddr: []byte("sDST"), + SndAddr: []byte("sender inner tx 2"), + GasPrice: 1, + GasLimit: 1, + RelayerAddr: txCopy.SndAddr, + } + innerTx3 := &transaction.Transaction{ + Nonce: 0, + Value: big.NewInt(10), + RcvAddr: []byte("sDST"), + SndAddr: providedAddrFail, + GasPrice: 1, + GasLimit: 1, + RelayerAddr: txCopy.SndAddr, + } + + txCopy.InnerTransactions = []*transaction.Transaction{innerTx1, innerTx2, innerTx3} + returnCode, err := execTx.ProcessTransaction(&txCopy) + assert.NoError(t, err) + assert.Equal(t, vmcommon.Ok, returnCode) + + expectedBalance := providedInitialBalance + for _, acnt := range accounts { + switch string(acnt.AddressBytes()) { + case "sSRC": + continue // relayer + case "sDST": + expectedBalance = big.NewInt(120) // 2 successful txs received + case "sender inner tx 1", "sender inner tx 2": + expectedBalance = big.NewInt(90) // one successful tx sent from each + default: + assert.Fail(t, "should not be other participants") + } + + assert.Equal(t, expectedBalance, acnt.GetBalance(), fmt.Sprintf("checks failed for address: %s", string(acnt.AddressBytes()))) + } + + require.Equal(t, 2, len(logs)) + for _, log := range logs { + require.Equal(t, core.CompletedTxEventIdentifier, string(log.Identifier)) + } + }) + t.Run("one inner fails should return success on relayed", func(t *testing.T) { + t.Parallel() + + providedInitialBalance := big.NewInt(100) + pubKeyConverter := testscommon.NewPubkeyConverterMock(4) + + accounts := map[string]state.UserAccountHandler{} + adb := &stateMock.AccountsStub{} + adb.LoadAccountCalled = func(address []byte) (vmcommon.AccountHandler, error) { + acnt, exists := accounts[string(address)] + if !exists { + acnt = createUserAcc(address) + accounts[string(address)] = acnt + _ = acnt.AddToBalance(providedInitialBalance) + } + + return acnt, nil + } + + scProcessorMock := &testscommon.SCProcessorMock{} + shardC, _ := sharding.NewMultiShardCoordinator(1, 0) + esdtTransferParser, _ := parsers.NewESDTTransferParser(marshaller) + argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), + } + txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) + + args := createArgsForTxProcessor() + args.Accounts = adb + args.ScProcessor = scProcessorMock + args.ShardCoordinator = shardC + args.TxTypeHandler = txTypeHandler + args.PubkeyConv = pubKeyConverter + args.ArgsParser = smartContract.NewArgumentParser() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedTransactionsV3Flag, common.FixRelayedBaseCostFlag) + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { + return big.NewInt(int64(tx.GetGasPrice() * tx.GetGasLimit())) + }, + ComputeRelayedTxFeesCalled: func(tx data.TransactionWithFeeHandler) (*big.Int, *big.Int, error) { + relayerFee := big.NewInt(0).SetInt64(int64(len(tx.GetUserTransactions()))) // gasPrice = 1 + totalFee := *relayerFee + for _, innerTx := range tx.GetUserTransactions() { + totalFee.Add(&totalFee, big.NewInt(0).SetUint64(innerTx.GetGasLimit())) + } + + return relayerFee, &totalFee, nil + }, + } + args.RelayedTxV3Processor, _ = txproc.NewRelayedTxV3Processor(txproc.ArgRelayedTxV3Processor{ + EconomicsFee: args.EconomicsFee, + ShardCoordinator: args.ShardCoordinator, + MaxTransactionsAllowed: 10, + }) + wasGetLogsCalled := false + wasRemoveCalled := false + args.FailedTxLogsAccumulator = &processMocks.FailedTxLogsAccumulatorMock{ + GetLogsCalled: func(txHash []byte) (data.TransactionHandler, []*vmcommon.LogEntry, bool) { + wasGetLogsCalled = true + + return &smartContractResult.SmartContractResult{}, []*vmcommon.LogEntry{}, true + }, + RemoveCalled: func(txHash []byte) { + wasRemoveCalled = true + }, + } + execTx, _ := txproc.NewTxProcessor(args) + + txCopy := *tx + usertTxCopy := *userTx // same inner tx twice should fail second time + txCopy.InnerTransactions = append(txCopy.InnerTransactions, &usertTxCopy) + returnCode, err := execTx.ProcessTransaction(&txCopy) + assert.NoError(t, err) + assert.Equal(t, vmcommon.Ok, returnCode) + assert.True(t, wasGetLogsCalled) + assert.True(t, wasRemoveCalled) + }) + t.Run("fees consumed mismatch should error", func(t *testing.T) { + t.Parallel() + + providedInitialBalance := big.NewInt(100) + pubKeyConverter := testscommon.NewPubkeyConverterMock(4) + + accounts := map[string]state.UserAccountHandler{} + adb := &stateMock.AccountsStub{} + adb.LoadAccountCalled = func(address []byte) (vmcommon.AccountHandler, error) { + acnt, exists := accounts[string(address)] + if !exists { + acnt = createUserAcc(address) + accounts[string(address)] = acnt + _ = acnt.AddToBalance(providedInitialBalance) + } + + return acnt, nil + } + wasRevertToSnapshotCalled := false + adb.RevertToSnapshotCalled = func(snapshot int) error { + wasRevertToSnapshotCalled = true + return nil + } + + scProcessorMock := &testscommon.SCProcessorMock{} + shardC, _ := sharding.NewMultiShardCoordinator(1, 0) + esdtTransferParser, _ := parsers.NewESDTTransferParser(marshaller) + argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), + } + txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) + + args := createArgsForTxProcessor() + args.Accounts = adb + args.ScProcessor = scProcessorMock + args.ShardCoordinator = shardC + args.TxTypeHandler = txTypeHandler + args.PubkeyConv = pubKeyConverter + args.ArgsParser = smartContract.NewArgumentParser() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedTransactionsV3Flag, common.FixRelayedBaseCostFlag) + increasingFee := big.NewInt(0) + args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ + ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { + increasingFee.Add(increasingFee, big.NewInt(1)) + return increasingFee + }, + } + args.RelayedTxV3Processor, _ = txproc.NewRelayedTxV3Processor(txproc.ArgRelayedTxV3Processor{ + EconomicsFee: args.EconomicsFee, + ShardCoordinator: args.ShardCoordinator, + MaxTransactionsAllowed: 10, + }) + execTx, _ := txproc.NewTxProcessor(args) + + txCopy := *tx + innerTx1 := &transaction.Transaction{ + Nonce: 0, + Value: big.NewInt(10), + RcvAddr: []byte("sDST"), + SndAddr: []byte("sender inner tx 1"), + GasPrice: 1, + GasLimit: 1, + RelayerAddr: txCopy.SndAddr, + } + innerTx2 := &transaction.Transaction{ + Nonce: 0, + Value: big.NewInt(10), + RcvAddr: []byte("sDST"), + SndAddr: []byte("sender inner tx 2"), + GasPrice: 1, + GasLimit: 1, + RelayerAddr: txCopy.SndAddr, + } + + txCopy.InnerTransactions = []*transaction.Transaction{innerTx1, innerTx2} + returnCode, err := execTx.ProcessTransaction(&txCopy) + assert.Error(t, err) + assert.Equal(t, vmcommon.UserError, returnCode) + assert.True(t, wasRevertToSnapshotCalled) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + testProcessRelayedTransactionV3(t, tx, userTx.SndAddr, userTx.RcvAddr, nil, vmcommon.Ok) + }) +} + +func testProcessRelayedTransactionV3( + t *testing.T, + tx *transaction.Transaction, + innerSender []byte, + finalRcvr []byte, + expectedErr error, + expectedCode vmcommon.ReturnCode, +) { + pubKeyConverter := testscommon.NewPubkeyConverterMock(4) + marshaller := &mock.MarshalizerMock{} + + acntSrc := createUserAcc(tx.SndAddr) + _ = acntSrc.AddToBalance(big.NewInt(100)) + acntDst := createUserAcc(tx.RcvAddr) + _ = acntDst.AddToBalance(big.NewInt(10)) + + acntFinal := createUserAcc(finalRcvr) + _ = acntFinal.AddToBalance(big.NewInt(10)) + acntInnerSender := createUserAcc(innerSender) + _ = acntInnerSender.AddToBalance(big.NewInt(10)) + + adb := &stateMock.AccountsStub{} + adb.LoadAccountCalled = func(address []byte) (vmcommon.AccountHandler, error) { + if bytes.Equal(address, tx.SndAddr) { + return acntSrc, nil + } + if bytes.Equal(address, tx.RcvAddr) { + return acntDst, nil + } + if bytes.Equal(address, finalRcvr) { + return acntFinal, nil + } + if bytes.Equal(address, innerSender) { + return acntInnerSender, nil + } + + return nil, errors.New("failure") + } + + scProcessorMock := &testscommon.SCProcessorMock{} + shardC, _ := sharding.NewMultiShardCoordinator(1, 0) + esdtTransferParser, _ := parsers.NewESDTTransferParser(marshaller) + argTxTypeHandler := coordinator.ArgNewTxTypeHandler{ + PubkeyConverter: pubKeyConverter, + ShardCoordinator: shardC, + BuiltInFunctions: builtInFunctions.NewBuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.ESDTMetadataContinuousCleanupFlag), + } + txTypeHandler, _ := coordinator.NewTxTypeHandler(argTxTypeHandler) + + args := createArgsForTxProcessor() + args.Accounts = adb + args.ScProcessor = scProcessorMock + args.ShardCoordinator = shardC + args.TxTypeHandler = txTypeHandler + args.PubkeyConv = pubKeyConverter + args.ArgsParser = smartContract.NewArgumentParser() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedTransactionsV3Flag, common.FixRelayedBaseCostFlag) + args.EconomicsFee = &economicsmocks.EconomicsHandlerMock{ + ComputeTxFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { + return big.NewInt(4) + }, + ComputeMoveBalanceFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { + return big.NewInt(4) + }, + ComputeGasLimitCalled: func(tx data.TransactionWithFeeHandler) uint64 { + return 4 + }, + ComputeRelayedTxFeesCalled: func(tx data.TransactionWithFeeHandler) (*big.Int, *big.Int, error) { + relayerFee := big.NewInt(0).SetInt64(int64(len(tx.GetUserTransactions()))) // gasPrice = 1 + totalFee := *relayerFee + for _, innerTx := range tx.GetUserTransactions() { + totalFee.Add(&totalFee, big.NewInt(0).SetUint64(innerTx.GetGasLimit())) + } + + return relayerFee, &totalFee, nil + }, + } + args.RelayedTxV3Processor, _ = txproc.NewRelayedTxV3Processor(txproc.ArgRelayedTxV3Processor{ + EconomicsFee: args.EconomicsFee, + ShardCoordinator: args.ShardCoordinator, + MaxTransactionsAllowed: 10, + }) + + execTx, _ := txproc.NewTxProcessor(args) + + returnCode, err := execTx.ProcessTransaction(tx) + assert.Equal(t, expectedErr, err) + assert.Equal(t, expectedCode, returnCode) +} + func TestTxProcessor_ProcessRelayedTransaction(t *testing.T) { t.Parallel() @@ -2126,7 +2702,7 @@ func TestTxProcessor_ProcessRelayedTransactionArgsParserErrorShouldError(t *test parseError := errors.New("parse error") args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return "", nil, parseError }} @@ -2189,7 +2765,7 @@ func TestTxProcessor_ProcessRelayedTransactionMultipleArgumentsShouldError(t *te tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshalled)) args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return core.RelayedTransaction, [][]byte{[]byte("0"), []byte("1")}, nil }} @@ -2252,7 +2828,7 @@ func TestTxProcessor_ProcessRelayedTransactionFailUnMarshalInnerShouldError(t *t tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshalled)) args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return core.RelayedTransaction, [][]byte{[]byte("0")}, nil }} @@ -2315,7 +2891,7 @@ func TestTxProcessor_ProcessRelayedTransactionDifferentSenderInInnerTxThanReceiv tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshalled)) args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return core.RelayedTransaction, [][]byte{userTxMarshalled}, nil }} @@ -2378,7 +2954,7 @@ func TestTxProcessor_ProcessRelayedTransactionSmallerValueInnerTxShouldError(t * tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshalled)) args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return core.RelayedTransaction, [][]byte{userTxMarshalled}, nil }} @@ -2441,7 +3017,7 @@ func TestTxProcessor_ProcessRelayedTransactionGasPriceMismatchShouldError(t *tes tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshalled)) args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return core.RelayedTransaction, [][]byte{userTxMarshalled}, nil }} @@ -2504,7 +3080,7 @@ func TestTxProcessor_ProcessRelayedTransactionGasLimitMismatchShouldError(t *tes tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshalled)) args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return core.RelayedTransaction, [][]byte{userTxMarshalled}, nil }} @@ -2612,7 +3188,7 @@ func TestTxProcessor_ProcessRelayedTransactionDisabled(t *testing.T) { args.ArgsParser = smartContract.NewArgumentParser() called := false args.BadTxForwarder = &mock.IntermediateTransactionHandlerMock{ - AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler, key []byte) error { called = true return nil }, @@ -2629,13 +3205,14 @@ func TestTxProcessor_ConsumeMoveBalanceWithUserTx(t *testing.T) { t.Parallel() args := createArgsForTxProcessor() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub() args.EconomicsFee = &economicsmocks.EconomicsHandlerStub{ - ComputeFeeForProcessingCalled: func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int { - return big.NewInt(1) - }, ComputeTxFeeCalled: func(tx data.TransactionWithFeeHandler) *big.Int { return big.NewInt(150) }, + ComputeFeeForProcessingCalled: func(tx data.TransactionWithFeeHandler, gasToUse uint64) *big.Int { + return big.NewInt(1) + }, } args.TxFeeHandler = &mock.FeeAccumulatorStub{ ProcessTransactionFeeCalled: func(cost *big.Int, devFee *big.Int, hash []byte) { @@ -2657,7 +3234,7 @@ func TestTxProcessor_ConsumeMoveBalanceWithUserTx(t *testing.T) { err := execTx.ProcessMoveBalanceCostRelayedUserTx(userTx, &smartContractResult.SmartContractResult{}, acntSrc, originalTxHash) assert.Nil(t, err) - assert.Equal(t, acntSrc.GetBalance(), big.NewInt(99)) + assert.Equal(t, big.NewInt(99), acntSrc.GetBalance()) } func TestTxProcessor_IsCrossTxFromMeShouldWork(t *testing.T) { @@ -2699,7 +3276,7 @@ func TestTxProcessor_ProcessUserTxOfTypeRelayedShouldError(t *testing.T) { tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshalled)) args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return core.RelayedTransaction, [][]byte{userTxMarshalled}, nil }} @@ -2732,7 +3309,6 @@ func TestTxProcessor_ProcessUserTxOfTypeRelayedShouldError(t *testing.T) { execTx, _ := txproc.NewTxProcessor(args) - txHash, _ := core.CalculateHash(args.Marshalizer, args.Hasher, tx) returnCode, err := execTx.ProcessUserTx(&tx, &userTx, tx.Value, tx.Nonce, txHash) assert.Nil(t, err) assert.Equal(t, vmcommon.UserError, returnCode) @@ -2763,7 +3339,7 @@ func TestTxProcessor_ProcessUserTxOfTypeMoveBalanceShouldWork(t *testing.T) { tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshalled)) args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return core.RelayedTransaction, [][]byte{userTxMarshalled}, nil }} @@ -2796,7 +3372,6 @@ func TestTxProcessor_ProcessUserTxOfTypeMoveBalanceShouldWork(t *testing.T) { execTx, _ := txproc.NewTxProcessor(args) - txHash, _ := core.CalculateHash(args.Marshalizer, args.Hasher, tx) returnCode, err := execTx.ProcessUserTx(&tx, &userTx, tx.Value, tx.Nonce, txHash) assert.Nil(t, err) assert.Equal(t, vmcommon.Ok, returnCode) @@ -2827,7 +3402,7 @@ func TestTxProcessor_ProcessUserTxOfTypeSCDeploymentShouldWork(t *testing.T) { tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshalled)) args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return core.RelayedTransaction, [][]byte{userTxMarshalled}, nil }} @@ -2860,7 +3435,6 @@ func TestTxProcessor_ProcessUserTxOfTypeSCDeploymentShouldWork(t *testing.T) { execTx, _ := txproc.NewTxProcessor(args) - txHash, _ := core.CalculateHash(args.Marshalizer, args.Hasher, tx) returnCode, err := execTx.ProcessUserTx(&tx, &userTx, tx.Value, tx.Nonce, txHash) assert.Nil(t, err) assert.Equal(t, vmcommon.Ok, returnCode) @@ -2891,7 +3465,7 @@ func TestTxProcessor_ProcessUserTxOfTypeSCInvokingShouldWork(t *testing.T) { tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshalled)) args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return core.RelayedTransaction, [][]byte{userTxMarshalled}, nil }} @@ -2924,7 +3498,6 @@ func TestTxProcessor_ProcessUserTxOfTypeSCInvokingShouldWork(t *testing.T) { execTx, _ := txproc.NewTxProcessor(args) - txHash, _ := core.CalculateHash(args.Marshalizer, args.Hasher, tx) returnCode, err := execTx.ProcessUserTx(&tx, &userTx, tx.Value, tx.Nonce, txHash) assert.Nil(t, err) assert.Equal(t, vmcommon.Ok, returnCode) @@ -2955,7 +3528,7 @@ func TestTxProcessor_ProcessUserTxOfTypeBuiltInFunctionCallShouldWork(t *testing tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshalled)) args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return core.RelayedTransaction, [][]byte{userTxMarshalled}, nil }} @@ -2988,7 +3561,6 @@ func TestTxProcessor_ProcessUserTxOfTypeBuiltInFunctionCallShouldWork(t *testing execTx, _ := txproc.NewTxProcessor(args) - txHash, _ := core.CalculateHash(args.Marshalizer, args.Hasher, tx) returnCode, err := execTx.ProcessUserTx(&tx, &userTx, tx.Value, tx.Nonce, txHash) assert.Nil(t, err) assert.Equal(t, vmcommon.Ok, returnCode) @@ -3019,7 +3591,7 @@ func TestTxProcessor_ProcessUserTxErrNotPayableShouldFailRelayTx(t *testing.T) { tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshalled)) args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return core.RelayedTransaction, [][]byte{userTxMarshalled}, nil }} @@ -3056,7 +3628,6 @@ func TestTxProcessor_ProcessUserTxErrNotPayableShouldFailRelayTx(t *testing.T) { execTx, _ := txproc.NewTxProcessor(args) - txHash, _ := core.CalculateHash(args.Marshalizer, args.Hasher, tx) returnCode, err := execTx.ProcessUserTx(&tx, &userTx, tx.Value, tx.Nonce, txHash) assert.Nil(t, err) assert.Equal(t, vmcommon.UserError, returnCode) @@ -3087,7 +3658,7 @@ func TestTxProcessor_ProcessUserTxFailedBuiltInFunctionCall(t *testing.T) { tx.Data = []byte(core.RelayedTransaction + "@" + hex.EncodeToString(userTxMarshalled)) args := createArgsForTxProcessor() - args.ArgsParser = &mock.ArgumentParserMock{ + args.ArgsParser = &testscommon.ArgumentParserMock{ ParseCallDataCalled: func(data string) (string, [][]byte, error) { return core.RelayedTransaction, [][]byte{userTxMarshalled}, nil }} @@ -3126,7 +3697,6 @@ func TestTxProcessor_ProcessUserTxFailedBuiltInFunctionCall(t *testing.T) { execTx, _ := txproc.NewTxProcessor(args) - txHash, _ := core.CalculateHash(args.Marshalizer, args.Hasher, tx) returnCode, err := execTx.ProcessUserTx(&tx, &userTx, tx.Value, tx.Nonce, txHash) assert.Nil(t, err) assert.Equal(t, vmcommon.ExecutionFailed, returnCode) @@ -3212,7 +3782,7 @@ func TestTxProcessor_shouldIncreaseNonce(t *testing.T) { t.Run("fix not enabled, should return true", func(t *testing.T) { args := createArgsForTxProcessor() - args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.RelayedNonceFixFlag) txProc, _ := txproc.NewTxProcessor(args) assert.True(t, txProc.ShouldIncreaseNonce(nil)) @@ -3276,12 +3846,12 @@ func TestTxProcessor_AddNonExecutableLog(t *testing.T) { originalTxHash, err := core.CalculateHash(args.Marshalizer, args.Hasher, originalTx) assert.Nil(t, err) numLogsSaved := 0 - args.TxLogsProcessor = &mock.TxLogsProcessorStub{ - SaveLogCalled: func(txHash []byte, tx data.TransactionHandler, vmLogs []*vmcommon.LogEntry) error { + args.FailedTxLogsAccumulator = &processMocks.FailedTxLogsAccumulatorMock{ + SaveLogsCalled: func(txHash []byte, tx data.TransactionHandler, logs []*vmcommon.LogEntry) error { assert.Equal(t, originalTxHash, txHash) assert.Equal(t, originalTx, tx) - assert.Equal(t, 1, len(vmLogs)) - firstLog := vmLogs[0] + assert.Equal(t, 1, len(logs)) + firstLog := logs[0] assert.Equal(t, core.SignalErrorOperation, string(firstLog.Identifier)) assert.Equal(t, sender, firstLog.Address) assert.Empty(t, firstLog.Data) @@ -3305,3 +3875,15 @@ func TestTxProcessor_AddNonExecutableLog(t *testing.T) { assert.Equal(t, 3, numLogsSaved) }) } + +func TestTxProcessor_IsInterfaceNil(t *testing.T) { + t.Parallel() + + args := createArgsForTxProcessor() + args.RelayedTxV3Processor = nil + proc, _ := txproc.NewTxProcessor(args) + require.True(t, proc.IsInterfaceNil()) + + proc, _ = txproc.NewTxProcessor(createArgsForTxProcessor()) + require.False(t, proc.IsInterfaceNil()) +} diff --git a/process/transactionEvaluator/simulationAccountsDB.go b/process/transactionEvaluator/simulationAccountsDB.go index ffac23e7994..b3a5cbecb0d 100644 --- a/process/transactionEvaluator/simulationAccountsDB.go +++ b/process/transactionEvaluator/simulationAccountsDB.go @@ -121,12 +121,7 @@ func (r *simulationAccountsDB) RootHash() ([]byte, error) { } // RecreateTrie won't do anything as write operations are disabled on this component -func (r *simulationAccountsDB) RecreateTrie(_ []byte) error { - return nil -} - -// RecreateTrieFromEpoch won't do anything as write operations are disabled on this component -func (r *simulationAccountsDB) RecreateTrieFromEpoch(_ common.RootHashHolder) error { +func (r *simulationAccountsDB) RecreateTrie(_ common.RootHashHolder) error { return nil } diff --git a/process/transactionEvaluator/simulationAccountsDB_test.go b/process/transactionEvaluator/simulationAccountsDB_test.go index cf651e06444..13655ba315f 100644 --- a/process/transactionEvaluator/simulationAccountsDB_test.go +++ b/process/transactionEvaluator/simulationAccountsDB_test.go @@ -52,7 +52,7 @@ func TestReadOnlyAccountsDB_WriteOperationsShouldNotCalled(t *testing.T) { t.Errorf(failErrMsg) return nil }, - RecreateTrieCalled: func(_ []byte) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { t.Errorf(failErrMsg) return nil }, diff --git a/process/transactionEvaluator/transactionEvaluator.go b/process/transactionEvaluator/transactionEvaluator.go index b9184ae3fad..032aeefdc80 100644 --- a/process/transactionEvaluator/transactionEvaluator.go +++ b/process/transactionEvaluator/transactionEvaluator.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/facade" @@ -32,6 +33,7 @@ type ArgsApiTransactionEvaluator struct { Accounts state.AccountsAdapterWithClean ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + BlockChain data.ChainHandler } type apiTransactionEvaluator struct { @@ -41,6 +43,7 @@ type apiTransactionEvaluator struct { feeHandler process.FeeHandler txSimulator facade.TransactionSimulatorProcessor enableEpochsHandler common.EnableEpochsHandler + blockChain data.ChainHandler mutExecution sync.RWMutex } @@ -64,6 +67,9 @@ func NewAPITransactionEvaluator(args ArgsApiTransactionEvaluator) (*apiTransacti if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + if check.IfNil(args.BlockChain) { + return nil, process.ErrNilBlockChain + } err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.CleanUpInformativeSCRsFlag, }) @@ -78,6 +84,7 @@ func NewAPITransactionEvaluator(args ArgsApiTransactionEvaluator) (*apiTransacti accounts: args.Accounts, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, + blockChain: args.BlockChain, } return tce, nil @@ -91,7 +98,9 @@ func (ate *apiTransactionEvaluator) SimulateTransactionExecution(tx *transaction ate.mutExecution.Unlock() }() - return ate.txSimulator.ProcessTx(tx) + currentHeader := ate.getCurrentBlockHeader() + + return ate.txSimulator.ProcessTx(tx, currentHeader) } // ComputeTransactionGasLimit will calculate how many gas units a transaction will consume @@ -110,7 +119,7 @@ func (ate *apiTransactionEvaluator) ComputeTransactionGasLimit(tx *transaction.T switch txTypeOnSender { case process.SCDeployment, process.SCInvoking, process.BuiltInFunctionCall, process.MoveBalance: return ate.simulateTransactionCost(tx, txTypeOnSender) - case process.RelayedTx, process.RelayedTxV2: + case process.RelayedTx, process.RelayedTxV2, process.RelayedTxV3: // TODO implement in the next PR return &transaction.CostResponse{ GasUnits: 0, @@ -140,8 +149,8 @@ func (ate *apiTransactionEvaluator) simulateTransactionCost(tx *transaction.Tran } costResponse := &transaction.CostResponse{} - - res, err := ate.txSimulator.ProcessTx(tx) + currentHeader := ate.getCurrentBlockHeader() + res, err := ate.txSimulator.ProcessTx(tx, currentHeader) if err != nil { costResponse.ReturnMessage = err.Error() return costResponse, nil @@ -228,6 +237,15 @@ func (ate *apiTransactionEvaluator) addMissingFieldsIfNeeded(tx *transaction.Tra return nil } +func (ate *apiTransactionEvaluator) getCurrentBlockHeader() data.HeaderHandler { + currentHeader := ate.blockChain.GetCurrentBlockHeader() + if check.IfNil(currentHeader) { + return ate.blockChain.GetGenesisHeader() + } + + return currentHeader +} + func (ate *apiTransactionEvaluator) getTxGasLimit(tx *transaction.Transaction) (uint64, error) { selfShardID := ate.shardCoordinator.SelfId() maxGasLimitPerBlock := ate.feeHandler.MaxGasLimitPerBlock(selfShardID) - 1 diff --git a/process/transactionEvaluator/transactionEvaluator_test.go b/process/transactionEvaluator/transactionEvaluator_test.go index 586072856ac..f36a5388777 100644 --- a/process/transactionEvaluator/transactionEvaluator_test.go +++ b/process/transactionEvaluator/transactionEvaluator_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" @@ -30,6 +31,7 @@ func createArgs() ArgsApiTransactionEvaluator { Accounts: &stateMock.AccountsStub{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + BlockChain: &testscommon.ChainHandlerMock{}, } } @@ -43,6 +45,16 @@ func TestTransactionEvaluator_NilTxTypeHandler(t *testing.T) { require.Equal(t, process.ErrNilTxTypeHandler, err) } +func TestTransactionEvaluator_NilBlockChain(t *testing.T) { + t.Parallel() + args := createArgs() + args.BlockChain = nil + tce, err := NewAPITransactionEvaluator(args) + + require.Nil(t, tce) + require.Equal(t, process.ErrNilBlockChain, err) +} + func TestTransactionEvaluator_NilFeeHandlerShouldErr(t *testing.T) { t.Parallel() @@ -115,7 +127,7 @@ func TestComputeTransactionGasLimit_MoveBalance(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{}, nil }, } @@ -154,7 +166,7 @@ func TestComputeTransactionGasLimit_MoveBalanceInvalidNonceShouldStillComputeCos }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return nil, simulationErr }, } @@ -185,7 +197,7 @@ func TestComputeTransactionGasLimit_BuiltInFunction(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{ VMOutput: &vmcommon.VMOutput{ ReturnCode: vmcommon.Ok, @@ -221,7 +233,7 @@ func TestComputeTransactionGasLimit_BuiltInFunctionShouldErr(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return nil, localErr }, } @@ -251,7 +263,7 @@ func TestComputeTransactionGasLimit_NilVMOutput(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{}, nil }, } @@ -260,7 +272,8 @@ func TestComputeTransactionGasLimit_NilVMOutput(t *testing.T) { return &stateMock.UserAccountStub{Balance: big.NewInt(100000)}, nil }, } - tce, _ := NewAPITransactionEvaluator(args) + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) tx := &transaction.Transaction{} cost, err := tce.ComputeTransactionGasLimit(tx) @@ -281,7 +294,7 @@ func TestComputeTransactionGasLimit_RetCodeNotOk(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, _ data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{ VMOutput: &vmcommon.VMOutput{ ReturnCode: vmcommon.UserError, @@ -335,3 +348,82 @@ func TestExtractGasUsedFromMessage(t *testing.T) { require.Equal(t, uint64(0), extractGasRemainedFromMessage("", gasRemainedSplitString)) require.Equal(t, uint64(0), extractGasRemainedFromMessage("too much gas provided, gas needed = 10000, gas used = wrong", gasUsedSlitString)) } + +func TestApiTransactionEvaluator_SimulateTransactionExecution(t *testing.T) { + t.Parallel() + + called := false + expectedNonce := uint64(1000) + args := createArgs() + args.BlockChain = &testscommon.ChainHandlerMock{} + _ = args.BlockChain.SetCurrentBlockHeaderAndRootHash(&block.Header{Nonce: expectedNonce}, []byte("test")) + + args.TxSimulator = &mock.TransactionSimulatorStub{ + ProcessTxCalled: func(_ *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { + called = true + require.Equal(t, expectedNonce, currentHeader.GetNonce()) + return nil, nil + }, + } + + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) + + tx := &transaction.Transaction{} + + _, err = tce.SimulateTransactionExecution(tx) + require.Nil(t, err) + require.True(t, called) +} + +func TestApiTransactionEvaluator_ComputeTransactionGasLimit(t *testing.T) { + t.Parallel() + + called := false + expectedNonce := uint64(1000) + args := createArgs() + args.BlockChain = &testscommon.ChainHandlerMock{} + _ = args.BlockChain.SetCurrentBlockHeaderAndRootHash(&block.Header{Nonce: expectedNonce}, []byte("test")) + + args.TxTypeHandler = &testscommon.TxTypeHandlerMock{ + ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) { + return process.SCInvoking, process.SCInvoking + }, + } + args.TxSimulator = &mock.TransactionSimulatorStub{ + ProcessTxCalled: func(_ *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { + called = true + require.Equal(t, expectedNonce, currentHeader.GetNonce()) + return &txSimData.SimulationResultsWithVMOutput{}, nil + }, + } + + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) + + tx := &transaction.Transaction{} + + _, err = tce.ComputeTransactionGasLimit(tx) + require.Nil(t, err) + require.True(t, called) +} + +func TestApiTransactionEvaluator_GetCurrentHeader(t *testing.T) { + t.Parallel() + + args := createArgs() + args.BlockChain = &testscommon.ChainHandlerMock{} + _ = args.BlockChain.SetGenesisHeader(&block.Header{Nonce: 0}) + + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) + + currentHeader := tce.getCurrentBlockHeader() + require.Equal(t, uint64(0), currentHeader.GetNonce()) + + expectedNonce := uint64(100) + _ = args.BlockChain.SetCurrentBlockHeaderAndRootHash(&block.Header{Nonce: expectedNonce}, []byte("root")) + + currentHeader = tce.getCurrentBlockHeader() + require.Equal(t, expectedNonce, currentHeader.GetNonce()) +} diff --git a/process/transactionEvaluator/transactionSimulator.go b/process/transactionEvaluator/transactionSimulator.go index 8d1a405643d..c87e79b0472 100644 --- a/process/transactionEvaluator/transactionSimulator.go +++ b/process/transactionEvaluator/transactionSimulator.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/receipt" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" @@ -33,6 +34,7 @@ type ArgsTxSimulator struct { Hasher hashing.Hasher Marshalizer marshal.Marshalizer DataFieldParser DataFieldParser + BlockChainHook process.BlockChainHookHandler } type refundHandler interface { @@ -50,6 +52,7 @@ type transactionSimulator struct { marshalizer marshal.Marshalizer refundDetector refundHandler dataFieldParser DataFieldParser + blockChainHook process.BlockChainHookHandler } // NewTransactionSimulator returns a new instance of a transactionSimulator @@ -78,6 +81,9 @@ func NewTransactionSimulator(args ArgsTxSimulator) (*transactionSimulator, error if check.IfNilReflect(args.DataFieldParser) { return nil, ErrNilDataFieldParser } + if check.IfNil(args.BlockChainHook) { + return nil, process.ErrNilBlockChainHook + } return &transactionSimulator{ txProcessor: args.TransactionProcessor, @@ -89,17 +95,20 @@ func NewTransactionSimulator(args ArgsTxSimulator) (*transactionSimulator, error hasher: args.Hasher, refundDetector: transactionAPI.NewRefundDetector(), dataFieldParser: args.DataFieldParser, + blockChainHook: args.BlockChainHook, }, nil } // ProcessTx will process the transaction in a special environment, where state-writing is not allowed -func (ts *transactionSimulator) ProcessTx(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { +func (ts *transactionSimulator) ProcessTx(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { ts.mutOperation.Lock() defer ts.mutOperation.Unlock() txStatus := transaction.TxStatusPending failReason := "" + ts.blockChainHook.SetCurrentHeader(currentHeader) + retCode, err := ts.txProcessor.ProcessTransaction(tx) if err != nil { failReason = err.Error() diff --git a/process/transactionEvaluator/transactionSimulator_test.go b/process/transactionEvaluator/transactionSimulator_test.go index 727f158c7eb..94da76f4254 100644 --- a/process/transactionEvaluator/transactionSimulator_test.go +++ b/process/transactionEvaluator/transactionSimulator_test.go @@ -76,6 +76,15 @@ func TestNewTransactionSimulator(t *testing.T) { }, exError: ErrNilHasher, }, + { + name: "NilBlockChainHook", + argsFunc: func() ArgsTxSimulator { + args := getTxSimulatorArgs() + args.BlockChainHook = nil + return args + }, + exError: process.ErrNilBlockChainHook, + }, { name: "NilMarshalizer", argsFunc: func() ArgsTxSimulator { @@ -125,7 +134,7 @@ func TestTransactionSimulator_ProcessTxProcessingErrShouldSignal(t *testing.T) { } ts, _ := NewTransactionSimulator(args) - results, err := ts.ProcessTx(&transaction.Transaction{Nonce: 37}) + results, err := ts.ProcessTx(&transaction.Transaction{Nonce: 37}, &block.Header{}) require.NoError(t, err) require.Equal(t, expErr.Error(), results.FailReason) } @@ -207,7 +216,7 @@ func TestTransactionSimulator_ProcessTxShouldIncludeScrsAndReceipts(t *testing.T txHash, _ := core.CalculateHash(args.Marshalizer, args.Hasher, tx) args.VMOutputCacher.Put(txHash, &vmcommon.VMOutput{}, 0) - results, err := ts.ProcessTx(tx) + results, err := ts.ProcessTx(tx, &block.Header{}) require.NoError(t, err) require.Equal( t, @@ -236,6 +245,7 @@ func getTxSimulatorArgs() ArgsTxSimulator { Marshalizer: &mock.MarshalizerMock{}, Hasher: &hashingMocks.HasherMock{}, DataFieldParser: dataFieldParser, + BlockChainHook: &testscommon.BlockChainHookStub{}, } } @@ -261,7 +271,7 @@ func TestTransactionSimulator_ProcessTxConcurrentCalls(t *testing.T) { for i := 0; i < numCalls; i++ { go func(idx int) { time.Sleep(time.Millisecond * 10) - _, _ = txSimulator.ProcessTx(tx) + _, _ = txSimulator.ProcessTx(tx, &block.Header{}) wg.Done() }(i) } diff --git a/process/transactionLog/failedTxLogsAccumulator.go b/process/transactionLog/failedTxLogsAccumulator.go new file mode 100644 index 00000000000..a0d973541bc --- /dev/null +++ b/process/transactionLog/failedTxLogsAccumulator.go @@ -0,0 +1,109 @@ +package transactionLog + +import ( + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/process" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +type logData struct { + tx data.TransactionHandler + logs []*vmcommon.LogEntry +} + +type failedTxLogsAccumulator struct { + mut sync.RWMutex + logsMap map[string]*logData +} + +// NewFailedTxLogsAccumulator returns a new instance of failedTxLogsAccumulator +func NewFailedTxLogsAccumulator() *failedTxLogsAccumulator { + return &failedTxLogsAccumulator{ + logsMap: make(map[string]*logData), + } +} + +// GetLogs returns the accumulated logs for the provided txHash +func (accumulator *failedTxLogsAccumulator) GetLogs(txHash []byte) (data.TransactionHandler, []*vmcommon.LogEntry, bool) { + if len(txHash) == 0 { + return nil, nil, false + } + + logsData, found := accumulator.getLogDataCopy(txHash) + + if !found { + return nil, nil, found + } + + return logsData.tx, logsData.logs, found +} + +func (accumulator *failedTxLogsAccumulator) getLogDataCopy(txHash []byte) (logData, bool) { + accumulator.mut.RLock() + defer accumulator.mut.RUnlock() + + logsData, found := accumulator.logsMap[string(txHash)] + if !found { + return logData{}, found + } + + logsDataCopy := logData{ + tx: logsData.tx, + } + + logsDataCopy.logs = append(logsDataCopy.logs, logsData.logs...) + + return logsDataCopy, found +} + +// SaveLogs saves the logs into the internal map +func (accumulator *failedTxLogsAccumulator) SaveLogs(txHash []byte, tx data.TransactionHandler, logs []*vmcommon.LogEntry) error { + if len(txHash) == 0 { + return process.ErrNilTxHash + } + + if check.IfNil(tx) { + return process.ErrNilTransaction + } + + if len(logs) == 0 { + return nil + } + + accumulator.mut.Lock() + defer accumulator.mut.Unlock() + + _, found := accumulator.logsMap[string(txHash)] + if !found { + accumulator.logsMap[string(txHash)] = &logData{ + tx: tx, + logs: logs, + } + + return nil + } + + accumulator.logsMap[string(txHash)].logs = append(accumulator.logsMap[string(txHash)].logs, logs...) + + return nil +} + +// Remove removes the accumulated logs for the provided txHash +func (accumulator *failedTxLogsAccumulator) Remove(txHash []byte) { + if len(txHash) == 0 { + return + } + + accumulator.mut.Lock() + defer accumulator.mut.Unlock() + + delete(accumulator.logsMap, string(txHash)) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (accumulator *failedTxLogsAccumulator) IsInterfaceNil() bool { + return accumulator == nil +} diff --git a/process/transactionLog/failedTxLogsAccumulator_test.go b/process/transactionLog/failedTxLogsAccumulator_test.go new file mode 100644 index 00000000000..691f4b41ffa --- /dev/null +++ b/process/transactionLog/failedTxLogsAccumulator_test.go @@ -0,0 +1,168 @@ +package transactionLog + +import ( + "fmt" + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/process" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +var ( + providedHash = []byte("hash") + providedTx = &transaction.Transaction{Nonce: 123} + providedLogs = []*vmcommon.LogEntry{ + { + Identifier: []byte("identifier"), + Address: []byte("addr"), + Topics: [][]byte{[]byte("topic")}, + Data: [][]byte{[]byte("data")}, + }, + } +) + +func TestNewFailedTxLogsAccumulator(t *testing.T) { + t.Parallel() + + accumulator := NewFailedTxLogsAccumulator() + require.NotNil(t, accumulator) +} + +func TestFailedTxLogsAccumulator_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var accumulator *failedTxLogsAccumulator + require.True(t, accumulator.IsInterfaceNil()) + + accumulator = NewFailedTxLogsAccumulator() + require.False(t, accumulator.IsInterfaceNil()) +} + +func TestFailedTxLogsAccumulator_GetLogs(t *testing.T) { + t.Parallel() + + accumulator := NewFailedTxLogsAccumulator() + tx, logs, ok := accumulator.GetLogs([]byte("")) + require.False(t, ok) + require.Nil(t, tx) + require.Nil(t, logs) + + err := accumulator.SaveLogs(providedHash, providedTx, providedLogs) + require.NoError(t, err) + + tx, logs, ok = accumulator.GetLogs([]byte("missing hash")) + require.False(t, ok) + require.Nil(t, tx) + require.Nil(t, logs) + + tx, logs, ok = accumulator.GetLogs(providedHash) + require.True(t, ok) + require.Equal(t, providedTx, tx) + require.Equal(t, providedLogs, logs) +} + +func TestFailedTxLogsAccumulator_SaveLogs(t *testing.T) { + t.Parallel() + + t.Run("empty hash should error", func(t *testing.T) { + t.Parallel() + + accumulator := NewFailedTxLogsAccumulator() + err := accumulator.SaveLogs([]byte(""), nil, nil) + require.Equal(t, process.ErrNilTxHash, err) + }) + t.Run("nil tx should error", func(t *testing.T) { + t.Parallel() + + accumulator := NewFailedTxLogsAccumulator() + err := accumulator.SaveLogs(providedHash, nil, nil) + require.Equal(t, process.ErrNilTransaction, err) + }) + t.Run("empty logs should return nil", func(t *testing.T) { + t.Parallel() + + accumulator := NewFailedTxLogsAccumulator() + err := accumulator.SaveLogs(providedHash, providedTx, nil) + require.NoError(t, err) + }) + t.Run("should work and append logs", func(t *testing.T) { + t.Parallel() + + accumulator := NewFailedTxLogsAccumulator() + err := accumulator.SaveLogs(providedHash, providedTx, providedLogs) + require.NoError(t, err) + + providedNewLogs := []*vmcommon.LogEntry{ + { + Identifier: []byte("identifier 2"), + Address: []byte("addr"), + Topics: [][]byte{[]byte("topic 2")}, + Data: [][]byte{[]byte("data 2")}, + }, + } + err = accumulator.SaveLogs(providedHash, providedTx, providedNewLogs) + require.NoError(t, err) + + expectedLogs := append(providedLogs, providedNewLogs...) + receivedTx, receivedLogs, ok := accumulator.GetLogs(providedHash) + require.True(t, ok) + require.Equal(t, providedTx, receivedTx) + require.Equal(t, expectedLogs, receivedLogs) + }) +} + +func TestFailedTxLogsAccumulator_Remove(t *testing.T) { + t.Parallel() + + accumulator := NewFailedTxLogsAccumulator() + err := accumulator.SaveLogs(providedHash, providedTx, providedLogs) + require.NoError(t, err) + _, _, ok := accumulator.GetLogs(providedHash) + require.True(t, ok) + + accumulator.Remove([]byte("")) // coverage only + + accumulator.Remove(providedHash) + _, _, ok = accumulator.GetLogs(providedHash) + require.False(t, ok) +} + +func TestTxLogProcessor_ConcurrentOperations(t *testing.T) { + t.Parallel() + + require.NotPanics(t, func() { + accumulator := NewFailedTxLogsAccumulator() + + numCalls := 1000 + wg := sync.WaitGroup{} + wg.Add(numCalls) + + for i := 0; i < numCalls; i++ { + go func(idx int) { + switch idx % 3 { + case 0: + err := accumulator.SaveLogs(providedHash, providedTx, []*vmcommon.LogEntry{ + { + Identifier: []byte(fmt.Sprintf("identifier %d", idx)), + Address: []byte("addr"), + Topics: [][]byte{[]byte(fmt.Sprintf("topic %d", idx))}, + Data: [][]byte{[]byte(fmt.Sprintf("data %d", idx))}, + }, + }) + require.NoError(t, err) + case 1: + _, _, _ = accumulator.GetLogs(providedHash) + case 2: + accumulator.Remove(providedHash) + } + + wg.Done() + }(i) + } + + wg.Wait() + }) +} diff --git a/process/transactionLog/printTxLogProcessor_test.go b/process/transactionLog/printTxLogProcessor_test.go index 5074ec617a4..c442440afb9 100644 --- a/process/transactionLog/printTxLogProcessor_test.go +++ b/process/transactionLog/printTxLogProcessor_test.go @@ -65,9 +65,6 @@ func TestPrintTxLogProcessor_SaveLog(t *testing.T) { err := ptlp.SaveLog([]byte("hash"), &transaction.Transaction{}, txLogEntry) require.Nil(t, err) - err = ptlp.SaveLog([]byte("hash"), &transaction.Transaction{}, nil) - require.Nil(t, err) - require.True(t, strings.Contains(buff.String(), "printTxLogProcessor.SaveLog")) require.True(t, strings.Contains(buff.String(), "printTxLogProcessor.entry")) } diff --git a/process/transactionLog/process.go b/process/transactionLog/process.go index 76a44294cd2..39b74f4b02a 100644 --- a/process/transactionLog/process.go +++ b/process/transactionLog/process.go @@ -36,7 +36,7 @@ type txLogProcessor struct { } // NewTxLogProcessor creates a transaction log processor capable of parsing logs from the VM -// and saving them into the injected storage +// and saving them into the injected storage func NewTxLogProcessor(args ArgTxLogProcessor) (*txLogProcessor, error) { storer := args.Storer if check.IfNil(storer) && args.SaveInStorageEnabled { @@ -179,7 +179,6 @@ func (tlp *txLogProcessor) saveLogToCache(txHash []byte, log *transaction.Log) { }) tlp.logsIndices[string(txHash)] = len(tlp.logs) - 1 tlp.mut.Unlock() - } // For SC deployment transactions, we use the sender address diff --git a/process/transactionLog/process_test.go b/process/transactionLog/process_test.go index f132c865486..c4f58322056 100644 --- a/process/transactionLog/process_test.go +++ b/process/transactionLog/process_test.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/process/transactionLog" + "github.com/multiversx/mx-chain-go/testscommon" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" @@ -88,7 +89,7 @@ func TestTxLogProcessor_SaveLogsMarshalErr(t *testing.T) { retErr := errors.New("marshal err") txLogProcessor, _ := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{ Storer: &storageStubs.StorerStub{}, - Marshalizer: &mock.MarshalizerStub{ + Marshalizer: &testscommon.MarshallerStub{ MarshalCalled: func(obj interface{}) (bytes []byte, err error) { return nil, retErr }, @@ -111,7 +112,7 @@ func TestTxLogProcessor_SaveLogsStoreErr(t *testing.T) { return retErr }, }, - Marshalizer: &mock.MarshalizerStub{ + Marshalizer: &testscommon.MarshallerStub{ MarshalCalled: func(obj interface{}) (bytes []byte, err error) { return nil, nil }, @@ -138,7 +139,7 @@ func TestTxLogProcessor_SaveLogsCallsPutWithMarshalBuff(t *testing.T) { return nil }, }, - Marshalizer: &mock.MarshalizerStub{ + Marshalizer: &testscommon.MarshallerStub{ MarshalCalled: func(obj interface{}) (bytes []byte, err error) { log, _ := obj.(*transaction.Log) require.Equal(t, expectedLogData[0], log.Events[0].Data) @@ -164,7 +165,7 @@ func TestTxLogProcessor_GetLogErrNotFound(t *testing.T) { return nil, errors.New("storer error") }, }, - Marshalizer: &mock.MarshalizerStub{}, + Marshalizer: &testscommon.MarshallerStub{}, SaveInStorageEnabled: true, }) @@ -181,7 +182,7 @@ func TestTxLogProcessor_GetLogUnmarshalErr(t *testing.T) { return make([]byte, 0), nil }, }, - Marshalizer: &mock.MarshalizerStub{ + Marshalizer: &testscommon.MarshallerStub{ UnmarshalCalled: func(obj interface{}, buff []byte) error { return retErr }, @@ -240,3 +241,19 @@ func TestTxLogProcessor_GetLogFromCacheNotInCacheShouldReturnFromStorage(t *test _, found := txLogProcessor.GetLogFromCache([]byte("txhash")) require.True(t, found) } + +func TestTxLogProcessor_IsInterfaceNil(t *testing.T) { + t.Parallel() + + txLogProcessor, _ := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{ + Storer: &storageStubs.StorerStub{}, + Marshalizer: nil, + }) + require.True(t, txLogProcessor.IsInterfaceNil()) + + txLogProcessor, _ = transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{ + Storer: &storageStubs.StorerStub{}, + Marshalizer: &testscommon.MarshallerStub{}, + }) + require.False(t, txLogProcessor.IsInterfaceNil()) +} diff --git a/scripts/docker-testnet/README.md b/scripts/docker-testnet/README.md new file mode 100644 index 00000000000..04b4de89631 --- /dev/null +++ b/scripts/docker-testnet/README.md @@ -0,0 +1,14 @@ +# Setting up a local-testnet with Docker + +First and foremost, one needs to build the **seednode** & **node** images. Hence, the **_build.sh_** +script is provided. This can be done, by invoking the script or building the images manually. + +``` +./build.sh # (Optional) Can be ignored if you already have the images stored in the local registry. +./setup.sh # Will setup the local-testnet. +./clean.sh # Will stop and remove the containers related to the local-testnet. + +Optionally +./stop.sh # Will stop all the containers in the local-testnet. +./start.sh # Will start all stopped containers from the initial local-testnet. +``` \ No newline at end of file diff --git a/scripts/docker-testnet/build.sh b/scripts/docker-testnet/build.sh new file mode 100755 index 00000000000..0b038e17e4b --- /dev/null +++ b/scripts/docker-testnet/build.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -eux + +export DOCKERTESTNETDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +MULTIVERSXTESTNETSCRIPTSDIR="$(dirname "$DOCKERTESTNETDIR")/testnet" + +source "$DOCKERTESTNETDIR/variables.sh" + +cd ${MULTIVERSXDIR} + +docker build -f docker/seednode/Dockerfile . -t seednode:dev + +docker build -f docker/node/Dockerfile . -t node:dev + diff --git a/scripts/docker-testnet/clean.sh b/scripts/docker-testnet/clean.sh new file mode 100755 index 00000000000..b8cfe1ea2d7 --- /dev/null +++ b/scripts/docker-testnet/clean.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +set -eux + +# Delete the entire testnet folder, which includes configuration, executables and logs. + +export MULTIVERSXTESTNETSCRIPTSDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +source "$MULTIVERSXTESTNETSCRIPTSDIR/variables.sh" + +# Get the IDs of containers attached to the network +CONTAINER_IDS=$(docker network inspect -f '{{range .Containers}}{{.Name}} {{end}}' "$DOCKER_NETWORK_NAME") + +# Stop each container +echo "Removing containers..." +for CONTAINER_ID in $CONTAINER_IDS; do + docker stop "$CONTAINER_ID" + docker rm "$CONTAINER_ID" +done + +echo "Removing network..." +docker network rm ${DOCKER_NETWORK_NAME} + +echo "Removing $TESTNETDIR..." +rm -rf $TESTNETDIR diff --git a/scripts/docker-testnet/functions.sh b/scripts/docker-testnet/functions.sh new file mode 100755 index 00000000000..74d39bf71dc --- /dev/null +++ b/scripts/docker-testnet/functions.sh @@ -0,0 +1,231 @@ +#!/usr/bin/env bash + +# Starts from 3, if the DOCKER_NETWORK_SUBNET ends with a 0. The first IP address is reserved for the gateway and the +# second one is allocated to the seednode. Therefore the counting starts from 3. If you modify the DOCKER_NETWORK_SUBNET +# variable, make sure to change this one accordingly too. +IP_HOST_BYTE=3 + +cloneRepositories() { + cd $(dirname $MULTIVERSXDIR) + + git clone git@github.com:multiversx/mx-chain-deploy-go.git || true + git clone git@github.com:multiversx/mx-chain-proxy-go.git || true +} + +buildNodeImages() { + cd $MULTIVERSXDIR + + docker build -f docker/seednode/Dockerfile . -t seednode:dev + + docker build -f docker/node/Dockerfile . -t node:dev +} + +createDockerNetwork() { + docker network create -d bridge --subnet=${DOCKER_NETWORK_SUBNET} ${DOCKER_NETWORK_NAME} + + # this variable is used to keep track of the allocated IP addresses in the network, by removing the last byte + # of the DOCKER_NETWORK_SUBNET. One can consider this the host network address without the last byte at the end. + export NETWORK_ADDRESS=$(echo "$DOCKER_NETWORK_SUBNET" | rev | cut -d. -f2- | rev) +} + +startSeedNode() { + local publishPortArgs="" + if [[ "$DOCKER_PUBLISH_PORTS" -gt 0 ]]; then + publishPortArgs="-p $DOCKER_PUBLISH_PORT_RANGE:10000" + (( DOCKER_PUBLISH_PORT_RANGE++ )) + fi + + docker run -d --name seednode \ + -v ${TESTNETDIR}/seednode/config:/go/mx-chain-go/cmd/seednode/config \ + --network ${DOCKER_NETWORK_NAME} \ + $publishPortArgs \ + seednode:dev \ + --rest-api-interface=0.0.0.0:10000 +} + +startObservers() { + local observerIdx=0 + local publishPortArgs="" + + # Example for loop with injected variables in Bash + for ((i = 0; i < SHARDCOUNT; i++)); do + for ((j = 0; j < SHARD_OBSERVERCOUNT; j++)); do + # Your commands or code to be executed in each iteration + KEY_INDEX=$((TOTAL_NODECOUNT - observerIdx - 1)) + + if [[ "$DOCKER_PUBLISH_PORTS" -gt 0 ]]; then + publishPortArgs="-p $DOCKER_PUBLISH_PORT_RANGE:10200" + fi + + docker run -d --name "observer${observerIdx}-${NETWORK_ADDRESS}.${IP_HOST_BYTE}-10200-shard${i}" \ + -v $TESTNETDIR/node/config:/go/mx-chain-go/cmd/node/config \ + --network ${DOCKER_NETWORK_NAME} \ + $publishPortArgs \ + node:dev \ + --destination-shard-as-observer $i \ + --rest-api-interface=0.0.0.0:10200 \ + --config ./config/config_observer.toml \ + --sk-index=${KEY_INDEX} \ + $EXTRA_OBSERVERS_FLAGS + + + (( IP_HOST_BYTE++ )) + (( DOCKER_PUBLISH_PORT_RANGE++ )) + ((observerIdx++)) || true + done + done + + for ((i = 0; i < META_OBSERVERCOUNT; i++)); do + KEY_INDEX=$((TOTAL_NODECOUNT - observerIdx - 1)) + + if [[ "$DOCKER_PUBLISH_PORTS" -gt 0 ]]; then + publishPortArgs="-p $DOCKER_PUBLISH_PORT_RANGE:10200" + fi + + docker run -d --name "observer${observerIdx}-${NETWORK_ADDRESS}.${IP_HOST_BYTE}-10200-metachain" \ + -v $TESTNETDIR/node/config:/go/mx-chain-go/cmd/node/config \ + --network ${DOCKER_NETWORK_NAME} \ + $publishPortArgs \ + node:dev \ + --destination-shard-as-observer "metachain" \ + --rest-api-interface=0.0.0.0:10200 \ + --config ./config/config_observer.toml \ + --sk-index=${KEY_INDEX} \ + $EXTRA_OBSERVERS_FLAGS + + (( IP_HOST_BYTE++ )) + (( DOCKER_PUBLISH_PORT_RANGE++ )) + ((observerIdx++)) || true + done +} + +startValidators() { + local validatorIdx=0 + local publishPortArgs="" + # Example for loop with injected variables in Bash + for ((i = 0; i < SHARDCOUNT; i++)); do + for ((j = 0; j < SHARD_VALIDATORCOUNT; j++)); do + + if [[ "$DOCKER_PUBLISH_PORTS" -gt 0 ]]; then + publishPortArgs="-p $DOCKER_PUBLISH_PORT_RANGE:10200" + fi + + docker run -d --name "validator${validatorIdx}-${NETWORK_ADDRESS}.${IP_HOST_BYTE}-10200-shard${i}" \ + -v $TESTNETDIR/node/config:/go/mx-chain-go/cmd/node/config \ + --network ${DOCKER_NETWORK_NAME} \ + $publishPortArgs \ + node:dev \ + --rest-api-interface=0.0.0.0:10200 \ + --config ./config/config_validator.toml \ + --sk-index=${validatorIdx} \ + + (( IP_HOST_BYTE++ )) + (( DOCKER_PUBLISH_PORT_RANGE++ )) + ((validatorIdx++)) || true + done + done + + for ((i = 0; i < META_VALIDATORCOUNT; i++)); do + + if [[ "$DOCKER_PUBLISH_PORTS" -gt 0 ]]; then + publishPortArgs="-p $DOCKER_PUBLISH_PORT_RANGE:10200" + fi + + docker run -d --name "validator${validatorIdx}-${NETWORK_ADDRESS}.${IP_HOST_BYTE}-10200-metachain" \ + -v $TESTNETDIR/node/config:/go/mx-chain-go/cmd/node/config \ + --network ${DOCKER_NETWORK_NAME} \ + $publishPortArgs \ + node:dev \ + --rest-api-interface=0.0.0.0:10200 \ + --config ./config/config_observer.toml \ + --sk-index=${validatorIdx} \ + + (( IP_HOST_BYTE++ )) + (( DOCKER_PUBLISH_PORT_RANGE++ )) + ((validatorIdx++)) || true + done +} + +updateProxyConfigDocker() { + pushd $TESTNETDIR/proxy/config + cp config.toml config_edit.toml + + # Truncate config.toml before the [[Observers]] list + sed -i -n '/\[\[Observers\]\]/q;p' config_edit.toml + + if [ "$SHARD_OBSERVERCOUNT" -le 0 ]; then + generateProxyValidatorListDocker config_edit.toml + else + generateProxyObserverListDocker config_edit.toml + fi + + mv config_edit.toml config.toml + echo "Updated configuration for the Proxy." + popd +} + +generateProxyObserverListDocker() { + local ipByte=3 + + for ((i = 0; i < SHARDCOUNT; i++)); do + for ((j = 0; j < SHARD_OBSERVERCOUNT; j++)); do + + echo "[[Observers]]" >> config_edit.toml + echo " ShardId = $i" >> config_edit.toml + echo " Address = \"http://${NETWORK_ADDRESS}.${ipByte}:10200\"" >> config_edit.toml + echo ""$'\n' >> config_edit.toml + + (( ipByte++ )) || true + done + done + + for META_OBSERVER in $(seq $META_OBSERVERCOUNT); do + echo "[[Observers]]" >> config_edit.toml + echo " ShardId = $METASHARD_ID" >> config_edit.toml + echo " Address = \"http://${NETWORK_ADDRESS}.${ipByte}:10200\"" >> config_edit.toml + echo ""$'\n' >> config_edit.toml + + (( ipByte++ )) || true + done +} + +generateProxyValidatorListDocker() { + local ipByte=3 + + for ((i = 0; i < SHARDCOUNT; i++)); do + for ((j = 0; j < SHARD_VALIDATORCOUNT; j++)); do + + echo "[[Observers]]" >> config_edit.toml + echo " ShardId = $i" >> config_edit.toml + echo " Address = \"http://${NETWORK_ADDRESS}.${ipByte}:10200\"" >> config_edit.toml + echo " Type = \"Validator\"" >> config_edit.toml + echo ""$'\n' >> config_edit.toml + + (( ipByte++ )) || true + done + done + + for META_OBSERVER in $(seq $META_VALIDATORCOUNT); do + echo "[[Observers]]" >> config_edit.toml + echo " ShardId = $METASHARD_ID" >> config_edit.toml + echo " Address = \"http://${NETWORK_ADDRESS}.${ipByte}:10200\"" >> config_edit.toml + echo " Type = \"Validator\"" >> config_edit.toml + echo ""$'\n' >> config_edit.toml + + (( ipByte++ )) || true + done +} + +buildProxyImage() { + pushd ${PROXYDIR} + cd ../.. + docker build -f docker/Dockerfile . -t proxy:dev +} + +startProxyDocker() { + docker run -d --name "proxy" \ + -v $TESTNETDIR/proxy/config:/mx-chain-proxy-go/cmd/proxy/config \ + --network ${DOCKER_NETWORK_NAME} \ + -p $PORT_PROXY:8080 \ + proxy:dev +} diff --git a/scripts/docker-testnet/setup.sh b/scripts/docker-testnet/setup.sh new file mode 100755 index 00000000000..c96f071251d --- /dev/null +++ b/scripts/docker-testnet/setup.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +set -eux + +export DOCKERTESTNETDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +MULTIVERSXTESTNETSCRIPTSDIR="$(dirname "$DOCKERTESTNETDIR")/testnet" + +source "$DOCKERTESTNETDIR/variables.sh" +source "$DOCKERTESTNETDIR/functions.sh" +source "$MULTIVERSXTESTNETSCRIPTSDIR/include/config.sh" +source "$MULTIVERSXTESTNETSCRIPTSDIR/include/build.sh" + +cloneRepositories + +prepareFolders + +buildConfigGenerator + +generateConfig + +copyConfig + +copySeednodeConfig +updateSeednodeConfig + +copyNodeConfig +updateNodeConfig + +createDockerNetwork + +startSeedNode +startObservers +startValidators + +if [ $USE_PROXY -eq 1 ]; then + buildProxyImage + prepareFolders_Proxy + copyProxyConfig + updateProxyConfigDocker + startProxyDocker +fi + diff --git a/scripts/docker-testnet/start.sh b/scripts/docker-testnet/start.sh new file mode 100755 index 00000000000..f68e49d62d4 --- /dev/null +++ b/scripts/docker-testnet/start.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -eux + +export DOCKERTESTNETDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +file_path="${DOCKERTESTNETDIR}/tmp/stopped_containers" + +# Check if the file exists +if [ ! -f "$file_path" ]; then + echo "File $file_path not found." + exit 1 +fi + +# Read the file line by line +while IFS= read -r line; do + docker start $line +done < "$file_path" + +rm -rf "${DOCKERTESTNETDIR}/tmp" \ No newline at end of file diff --git a/scripts/docker-testnet/stop.sh b/scripts/docker-testnet/stop.sh new file mode 100755 index 00000000000..6c5054570dc --- /dev/null +++ b/scripts/docker-testnet/stop.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +set -eux + +# Delete the entire testnet folder, which includes configuration, executables and logs. + +export MULTIVERSXTESTNETSCRIPTSDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +source "$MULTIVERSXTESTNETSCRIPTSDIR/variables.sh" + +# Get the IDs of containers attached to the network +export CONTAINER_IDS=$(docker network inspect -f '{{range $k, $v := .Containers}}{{printf "%s\n" $k}}{{end}}' "$DOCKER_NETWORK_NAME") + +mkdir -p "$MULTIVERSXTESTNETSCRIPTSDIR/tmp" + +# Stop each container +echo "Stopping containers..." +for CONTAINER_ID in $CONTAINER_IDS; do + docker stop "$CONTAINER_ID" + echo "$CONTAINER_ID" >> "$MULTIVERSXTESTNETSCRIPTSDIR/tmp/stopped_containers" +done \ No newline at end of file diff --git a/scripts/docker-testnet/variables.sh b/scripts/docker-testnet/variables.sh new file mode 100644 index 00000000000..bdcb26662fd --- /dev/null +++ b/scripts/docker-testnet/variables.sh @@ -0,0 +1,186 @@ +# These paths must be absolute + +######################################################################## +# Docker network configuration + +# Don't change the subnet, unless you know what you are doing. Prone to errors. +export DOCKER_NETWORK_SUBNET="172.18.0.0/24" +export DOCKER_NETWORK_NAME="local-testnet" + +# By default ports won't be published. If set to 1, all containers will port-forward to host network. +export DOCKER_PUBLISH_PORTS=1 + +if [[ "$DOCKER_PUBLISH_PORTS" -gt 0 ]]; then + export DOCKER_PUBLISH_PORT_RANGE=30000 +fi + +######################################################################## + + +# METASHARD_ID will be used to identify a shard ID as metachain +export METASHARD_ID=4294967295 + +# Path to mx-chain-go. Determined automatically. Do not change. +export MULTIVERSXDIR=$(dirname $(dirname $MULTIVERSXTESTNETSCRIPTSDIR)) + +# Enable the MultiversX Proxy. Note that this is a private repository +# (mx-chain-proxy-go). +export USE_PROXY=1 + +# Enable the MultiversX Transaction Generator. Note that this is a private +# repository (mx-chain-txgen-go). +export USE_TXGEN=0 + +# Path where the testnet will be instantiated. This folder is assumed to not +# exist, but it doesn't matter if it already does. It will be created if not, +# anyway. +export TESTNETDIR="$HOME/MultiversX/testnet" + +# Path to mx-chain-deploy-go, branch: master. Default: near mx-chain-go. +export CONFIGGENERATORDIR="$(dirname $MULTIVERSXDIR)/mx-chain-deploy-go/cmd/filegen" + +export CONFIGGENERATOR="$CONFIGGENERATORDIR/filegen" # Leave unchanged. +export CONFIGGENERATOROUTPUTDIR="output" + +# Path to the executable node. Leave unchanged unless well justified. +export NODEDIR="$MULTIVERSXDIR/cmd/node" +export NODE="$NODEDIR/node" # Leave unchanged + +# Path to the executable seednode. Leave unchanged unless well justified. +export SEEDNODEDIR="$MULTIVERSXDIR/cmd/seednode" +export SEEDNODE="$SEEDNODEDIR/seednode" # Leave unchanged. + +# Niceness value of the Seednode, Observer Nodes and Validator Nodes. Leave +# blank to not adjust niceness. +export NODE_NICENESS=10 + +# Start a watcher daemon for each validator node, which restarts the node if it +# is suffled out of its shard. +export NODE_WATCHER=0 + +# Delays after running executables. +export SEEDNODE_DELAY=5 +export GENESIS_DELAY=30 +export HARDFORK_DELAY=900 #15 minutes enough to take export and gracefully close +export NODE_DELAY=60 + +export GENESIS_STAKE_TYPE="direct" #'delegated' or 'direct' as in direct stake + +#if set to 1, each observer will turn off the antiflooding capability, allowing spam in our network +export OBSERVERS_ANTIFLOOD_DISABLE=0 + +# Shard structure +export SHARDCOUNT=2 +export SHARD_VALIDATORCOUNT=3 +export SHARD_OBSERVERCOUNT=1 +export SHARD_CONSENSUS_SIZE=3 + +# Metashard structure +export META_VALIDATORCOUNT=3 +export META_OBSERVERCOUNT=1 +export META_CONSENSUS_SIZE=$META_VALIDATORCOUNT + +# MULTI_KEY_NODES if set to 1, one observer will be generated on each shard that will handle all generated keys +export MULTI_KEY_NODES=0 + +# EXTRA_KEYS if set to 1, extra keys will be added to the generated keys +export EXTRA_KEYS=1 + +# ALWAYS_NEW_CHAINID will generate a fresh new chain ID each time start.sh/config.sh is called +export ALWAYS_NEW_CHAINID=1 + +# ROUNDS_PER_EPOCH represents the number of rounds per epoch. If set to 0, it won't override the node's config +export ROUNDS_PER_EPOCH=0 + +# HYSTERESIS defines the hysteresis value for number of nodes in shard +export HYSTERESIS=0.0 + +# ALWAYS_NEW_APP_VERSION will set a new version each time the node will be compiled +export ALWAYS_NEW_APP_VERSION=0 + +# ALWAYS_UPDATE_CONFIGS will re-generate configs (toml + json) each time ./start.sh +# Set this variable to 0 when testing bootstrap from storage or other edge cases where you do not want a fresh new config +# each time. +export ALWAYS_UPDATE_CONFIGS=1 + +# IP of the seednode. This should be the first IP allocated in the local testnet network. If you modify the default +# DOCKER_NETWORK_SUBNET, you will need to edit this one accordingly too. +export SEEDNODE_IP="$(echo "$DOCKER_NETWORK_SUBNET" | rev | cut -d. -f2- | rev).2" + +# Ports used by the Nodes +export PORT_SEEDNODE="9999" +export PORT_ORIGIN_OBSERVER="21100" +export PORT_ORIGIN_OBSERVER_REST="10000" +export PORT_ORIGIN_VALIDATOR="21500" +export PORT_ORIGIN_VALIDATOR_REST="9500" + +######################################################################## +# Proxy configuration + +# Path to mx-chain-proxy-go, branch: master. Default: near mx-chain-go. +export PROXYDIR="$(dirname $MULTIVERSXDIR)/mx-chain-proxy-go/cmd/proxy" +export PROXY=$PROXYDIR/proxy # Leave unchanged. + +export PORT_PROXY="7950" +export PROXY_DELAY=10 + +######################################################################## +# TxGen configuration + +# Path to mx-chain-txgen-go. Default: near mx-chain-go. +export TXGENDIR="$(dirname $MULTIVERSXDIR)/mx-chain-txgen-go/cmd/txgen" +export TXGEN=$TXGENDIR/txgen # Leave unchanged. + +export PORT_TXGEN="7951" + +export TXGEN_SCENARIOS_LINE='Scenarios = ["basic", "erc20", "esdt"]' + +# Number of accounts to be generated by txgen +export NUMACCOUNTS="250" + +# Whether txgen should regenerate its accounts when starting, or not. +# Recommended value is 1, but 0 is useful to run the txgen a second time, to +# continue a testing session on the same accounts. +export TXGEN_REGENERATE_ACCOUNTS=0 + +# COPY_BACK_CONFIGS when set to 1 will copy back the configs and keys to the ./cmd/node/config directory +# in order to have a node in the IDE that can run a node in debug mode but in the same network with the rest of the nodes +# this option greatly helps the debugging process when running a small system test +export COPY_BACK_CONFIGS=0 +# SKIP_VALIDATOR_IDX when setting a value greater than -1 will not launch the validator with the provided index +export SKIP_VALIDATOR_IDX=-1 +# SKIP_OBSERVER_IDX when setting a value greater than -1 will not launch the observer with the provided index +export SKIP_OBSERVER_IDX=-1 + +# USE_HARDFORK will prepare the nodes to run the hardfork process, if needed +export USE_HARDFORK=1 + +# Load local overrides, .gitignored +LOCAL_OVERRIDES="$MULTIVERSXTESTNETSCRIPTSDIR/local.sh" +if [ -f "$LOCAL_OVERRIDES" ]; then + source "$MULTIVERSXTESTNETSCRIPTSDIR/local.sh" +fi + +# Leave unchanged. +let "total_observer_count = $SHARD_OBSERVERCOUNT * $SHARDCOUNT + $META_OBSERVERCOUNT" +export TOTAL_OBSERVERCOUNT=$total_observer_count + +# to enable the full archive feature on the observers, please use the --full-archive flag +export EXTRA_OBSERVERS_FLAGS="-operation-mode db-lookup-extension" + +if [[ $MULTI_KEY_NODES -eq 1 ]]; then + EXTRA_OBSERVERS_FLAGS="--no-key" +fi + +# Leave unchanged. +let "total_node_count = $SHARD_VALIDATORCOUNT * $SHARDCOUNT + $META_VALIDATORCOUNT + $TOTAL_OBSERVERCOUNT" +export TOTAL_NODECOUNT=$total_node_count + +# VALIDATOR_KEY_PEM_FILE is the pem file name when running single key mode, with all nodes' keys +export VALIDATOR_KEY_PEM_FILE="validatorKey.pem" + +# MULTI_KEY_PEM_FILE is the pem file name when running multi key mode, with all managed +export MULTI_KEY_PEM_FILE="allValidatorsKeys.pem" + +# EXTRA_KEY_PEM_FILE is the pem file name when running multi key mode, with all extra managed +export EXTRA_KEY_PEM_FILE="extraValidatorsKeys.pem" diff --git a/scripts/testnet/include/config.sh b/scripts/testnet/include/config.sh index 56d792dc7ed..25f836a84b7 100644 --- a/scripts/testnet/include/config.sh +++ b/scripts/testnet/include/config.sh @@ -3,6 +3,7 @@ generateConfig() { TMP_SHARD_OBSERVERCOUNT=$SHARD_OBSERVERCOUNT TMP_META_OBSERVERCOUNT=$META_OBSERVERCOUNT + # set num of observers to 0, they will start with generated keys if [[ $MULTI_KEY_NODES -eq 1 ]]; then TMP_SHARD_OBSERVERCOUNT=0 TMP_META_OBSERVERCOUNT=0 @@ -19,7 +20,8 @@ generateConfig() { -num-of-observers-in-metachain $TMP_META_OBSERVERCOUNT \ -metachain-consensus-group-size $META_CONSENSUS_SIZE \ -stake-type $GENESIS_STAKE_TYPE \ - -hysteresis $HYSTERESIS + -hysteresis $HYSTERESIS \ + -round-duration $ROUND_DURATION_IN_MS popd } @@ -131,10 +133,53 @@ updateNodeConfig() { sed -i '/\[Antiflood\]/,/\[Logger\]/ s/true/false/' config_observer.toml fi + updateConfigsForStakingV4 + echo "Updated configuration for Nodes." popd } +updateConfigsForStakingV4() { + config=$(cat enableEpochs.toml) + + echo "Updating staking v4 configs" + + # Get the StakingV4Step3EnableEpoch value + staking_enable_epoch=$(echo "$config" | awk -F '=' '/ StakingV4Step3EnableEpoch/{gsub(/^[ \t]+|[ \t]+$/,"", $2); print $2; exit}') + # Count the number of entries in MaxNodesChangeEnableEpoch + entry_count=$(echo "$config" | awk '/MaxNodesChangeEnableEpoch/,/\]/{if ($0 ~ /\{/) {count++}} END {print count}') + + # Check if entry_count is less than 2 + if [ "$entry_count" -lt 2 ]; then + echo "Not enough entries found to update" + else + # Find all entries in MaxNodesChangeEnableEpoch + all_entries=$(awk '/MaxNodesChangeEnableEpoch/,/\]/{if ($0 ~ /^[[:space:]]*\{/) {p=1}; if (p) print; if ($0 ~ /\]/) p=0}' enableEpochs.toml | grep -vE '^\s*#' | sed '/^\s*$/d') + + # Get the index of the entry with EpochEnable equal to StakingV4Step3EnableEpoch + index=$(echo "$all_entries" | grep -n "EpochEnable = $staking_enable_epoch" | cut -d: -f1) + + if [[ -z "${index// }" ]]; then + echo -e "\033[1;33mWarning: MaxNodesChangeEnableEpoch does not contain an entry enable epoch for StakingV4Step3EnableEpoch, nodes might fail to start...\033[0m" + else + prev_entry=$(echo "$all_entries" | sed -n "$((index-1))p") + curr_entry=$(echo "$all_entries" | sed -n "$((index))p") + + # Extract the value of MaxNumNodes & NodesToShufflePerShard from prev_entry + max_nodes_from_prev_epoch=$(echo "$prev_entry" | awk -F 'MaxNumNodes = ' '{print $2}' | cut -d ',' -f1) + nodes_to_shuffle_per_shard=$(echo "$prev_entry" | awk -F 'NodesToShufflePerShard = ' '{gsub(/[^0-9]+/, "", $2); print $2}') + + # Calculate the new MaxNumNodes value based on the formula + new_max_nodes=$((max_nodes_from_prev_epoch - (SHARDCOUNT + 1) * nodes_to_shuffle_per_shard)) + curr_entry_updated=$(echo "$curr_entry" | awk -v new_max_nodes="$new_max_nodes" '{gsub(/MaxNumNodes = [0-9]+,/, "MaxNumNodes = " new_max_nodes ",")}1') + + echo "Updating entry in MaxNodesChangeEnableEpoch from $curr_entry to $curr_entry_updated" + + sed -i "/$staking_enable_epoch/,/$staking_enable_epoch/ s|.*$curr_entry.*|$curr_entry_updated|" enableEpochs.toml + fi + fi +} + copyProxyConfig() { pushd $TESTNETDIR diff --git a/scripts/testnet/include/observers.sh b/scripts/testnet/include/observers.sh index 6ba9ff9293a..50e7f5ade03 100644 --- a/scripts/testnet/include/observers.sh +++ b/scripts/testnet/include/observers.sh @@ -82,10 +82,18 @@ assembleCommand_startObserverNode() { let "KEY_INDEX=$TOTAL_NODECOUNT - $OBSERVER_INDEX - 1" WORKING_DIR=$TESTNETDIR/node_working_dirs/observer$OBSERVER_INDEX + KEYS_FLAGS="-validator-key-pem-file ./config/validatorKey.pem -sk-index $KEY_INDEX" + # if node is running in multi key mode, in order to avoid loading the common allValidatorKeys.pem file + # and force generating a new key for observers, simply provide an invalid path + if [[ $MULTI_KEY_NODES -eq 1 ]]; then + TMP_MISSING_PEM="missing-file.pem" + KEYS_FLAGS="-all-validator-keys-pem-file $TMP_MISSING_PEM -validator-key-pem-file $TMP_MISSING_PEM" + fi + local nodeCommand="./node \ -port $PORT --profile-mode -log-save -log-level $LOGLEVEL --log-logger-name --log-correlation --use-health-service -rest-api-interface localhost:$RESTAPIPORT \ -destination-shard-as-observer $SHARD \ - -sk-index $KEY_INDEX \ + $KEYS_FLAGS \ -working-directory $WORKING_DIR -config ./config/config_observer.toml $EXTRA_OBSERVERS_FLAGS" if [ -n "$NODE_NICENESS" ] diff --git a/scripts/testnet/variables.sh b/scripts/testnet/variables.sh index 1dc3c7cc65c..c5a5b013523 100644 --- a/scripts/testnet/variables.sh +++ b/scripts/testnet/variables.sh @@ -62,6 +62,8 @@ export META_VALIDATORCOUNT=3 export META_OBSERVERCOUNT=1 export META_CONSENSUS_SIZE=$META_VALIDATORCOUNT +export ROUND_DURATION_IN_MS=6000 + # MULTI_KEY_NODES if set to 1, one observer will be generated on each shard that will handle all generated keys export MULTI_KEY_NODES=0 @@ -170,10 +172,6 @@ export TOTAL_OBSERVERCOUNT=$total_observer_count # to enable the full archive feature on the observers, please use the --full-archive flag export EXTRA_OBSERVERS_FLAGS="-operation-mode db-lookup-extension" -if [[ $MULTI_KEY_NODES -eq 1 ]]; then - EXTRA_OBSERVERS_FLAGS="--no-key" -fi - # Leave unchanged. let "total_node_count = $SHARD_VALIDATORCOUNT * $SHARDCOUNT + $META_VALIDATORCOUNT + $TOTAL_OBSERVERCOUNT" export TOTAL_NODECOUNT=$total_node_count diff --git a/sharding/mock/enableEpochsHandlerMock.go b/sharding/mock/enableEpochsHandlerMock.go index e275c4ea165..9a842f9adae 100644 --- a/sharding/mock/enableEpochsHandlerMock.go +++ b/sharding/mock/enableEpochsHandlerMock.go @@ -7,7 +7,6 @@ import ( // EnableEpochsHandlerMock - type EnableEpochsHandlerMock struct { - WaitingListFixEnableEpochField uint32 RefactorPeersMiniBlocksEnableEpochField uint32 IsRefactorPeersMiniBlocksFlagEnabledField bool CurrentEpoch uint32 @@ -18,8 +17,6 @@ func (mock *EnableEpochsHandlerMock) GetActivationEpoch(flag core.EnableEpochFla switch flag { case common.RefactorPeersMiniBlocksFlag: return mock.RefactorPeersMiniBlocksEnableEpochField - case common.WaitingListFixFlag: - return mock.WaitingListFixEnableEpochField default: return 0 @@ -51,6 +48,16 @@ func (mock *EnableEpochsHandlerMock) FixGasRemainingForSaveKeyValueBuiltinFuncti return false } +// IsRelayedTransactionsV3FlagEnabled - +func (mock *EnableEpochsHandlerMock) IsRelayedTransactionsV3FlagEnabled() bool { + return false +} + +// IsFixRelayedBaseCostFlagEnabled - +func (mock *EnableEpochsHandlerMock) IsFixRelayedBaseCostFlagEnabled() bool { + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (mock *EnableEpochsHandlerMock) IsInterfaceNil() bool { return mock == nil diff --git a/sharding/nodesCoordinator/common.go b/sharding/nodesCoordinator/common.go index c771e711740..1e376cd6b65 100644 --- a/sharding/nodesCoordinator/common.go +++ b/sharding/nodesCoordinator/common.go @@ -52,6 +52,7 @@ func displayNodesConfiguration( waiting map[uint32][]Validator, leaving map[uint32][]Validator, actualRemaining map[uint32][]Validator, + shuffledOut map[uint32][]Validator, nbShards uint32, ) { for shard := uint32(0); shard <= nbShards; shard++ { @@ -75,6 +76,10 @@ func displayNodesConfiguration( pk := v.PubKey() log.Debug("actually remaining", "pk", pk, "shardID", shardID) } + for _, v := range shuffledOut[shardID] { + pk := v.PubKey() + log.Debug("shuffled out", "pk", pk, "shardID", shardID) + } } } diff --git a/sharding/nodesCoordinator/consensusGroupProviderBench_test.go b/sharding/nodesCoordinator/consensusGroupProviderBench_test.go index c24f6f9549f..49731812213 100644 --- a/sharding/nodesCoordinator/consensusGroupProviderBench_test.go +++ b/sharding/nodesCoordinator/consensusGroupProviderBench_test.go @@ -1,11 +1,9 @@ package nodesCoordinator import ( - "math/rand" "testing" ) -const randSeed = 75 const numValidators = 63 const numValidatorsInEligibleList = 400 @@ -20,7 +18,6 @@ func getRandomness() []byte { func BenchmarkReslicingBasedProvider_Get(b *testing.B) { numVals := numValidators - rand.Seed(randSeed) expElList := getExpandedEligibleList(numValidatorsInEligibleList) randomness := getRandomness() @@ -32,7 +29,6 @@ func BenchmarkReslicingBasedProvider_Get(b *testing.B) { func BenchmarkSelectionBasedProvider_Get(b *testing.B) { numVals := numValidators - rand.Seed(randSeed) expElList := getExpandedEligibleList(numValidatorsInEligibleList) randomness := getRandomness() diff --git a/sharding/nodesCoordinator/dtos.go b/sharding/nodesCoordinator/dtos.go index 854dd931d8d..75c28194a6a 100644 --- a/sharding/nodesCoordinator/dtos.go +++ b/sharding/nodesCoordinator/dtos.go @@ -7,6 +7,7 @@ type ArgsUpdateNodes struct { NewNodes []Validator UnStakeLeaving []Validator AdditionalLeaving []Validator + Auction []Validator Rand []byte NbShards uint32 Epoch uint32 @@ -16,6 +17,8 @@ type ArgsUpdateNodes struct { type ResUpdateNodes struct { Eligible map[uint32][]Validator Waiting map[uint32][]Validator + ShuffledOut map[uint32][]Validator Leaving []Validator StillRemaining []Validator + LowWaitingList bool } diff --git a/sharding/nodesCoordinator/errors.go b/sharding/nodesCoordinator/errors.go index def3944cc0d..3d063f4605e 100644 --- a/sharding/nodesCoordinator/errors.go +++ b/sharding/nodesCoordinator/errors.go @@ -114,3 +114,12 @@ var ErrNilGenesisNodesSetupHandler = errors.New("nil genesis nodes setup handler // ErrKeyNotFoundInWaitingList signals that the provided key has not been found in waiting list var ErrKeyNotFoundInWaitingList = errors.New("key not found in waiting list") + +// ErrNilNodesCoordinatorRegistryFactory signals that a nil nodes coordinator registry factory has been given +var ErrNilNodesCoordinatorRegistryFactory = errors.New("nil nodes coordinator registry factory has been given") + +// ErrReceivedAuctionValidatorsBeforeStakingV4 signals that auction nodes have been received from peer mini blocks before enabling staking v4 +var ErrReceivedAuctionValidatorsBeforeStakingV4 = errors.New("should not have received selected nodes from auction in peer mini blocks, since staking v4 is not enabled yet") + +// ErrNilEpochNotifier signals that a nil EpochNotifier has been provided +var ErrNilEpochNotifier = errors.New("nil epoch notifier provided") diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index a4a7e178ee1..71d2b5351b3 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -7,10 +7,13 @@ import ( "sync" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/hashing/sha256" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" ) var _ NodesShuffler = (*randHashShuffler)(nil) @@ -24,22 +27,36 @@ type NodesShufflerArgs struct { ShuffleBetweenShards bool MaxNodesEnableConfig []config.MaxNodesChangeConfig EnableEpochsHandler common.EnableEpochsHandler + EnableEpochs config.EnableEpochs } type shuffleNodesArg struct { - eligible map[uint32][]Validator - waiting map[uint32][]Validator - unstakeLeaving []Validator - additionalLeaving []Validator - newNodes []Validator - randomness []byte - distributor ValidatorsDistributor - nodesMeta uint32 - nodesPerShard uint32 - nbShards uint32 - maxNodesToSwapPerShard uint32 - flagBalanceWaitingLists bool - flagWaitingListFix bool + eligible map[uint32][]Validator + waiting map[uint32][]Validator + unstakeLeaving []Validator + additionalLeaving []Validator + newNodes []Validator + auction []Validator + randomness []byte + distributor ValidatorsDistributor + nodesMeta uint32 + nodesPerShard uint32 + nbShards uint32 + maxNodesToSwapPerShard uint32 + maxNumNodes uint32 + flagBalanceWaitingLists bool + flagStakingV4Step2 bool + flagStakingV4Step3 bool + flagCleanupAuctionOnLowWaitingList bool +} + +type shuffledNodesConfig struct { + numShuffled uint32 + numNewEligible uint32 + numNewWaiting uint32 + numSelectedAuction uint32 + maxNumNodes uint32 + flagStakingV4Step2 bool } // TODO: Decide if transaction load statistics will be used for limiting the number of shards @@ -48,16 +65,20 @@ type randHashShuffler struct { // when reinitialization of node in new shard is implemented shuffleBetweenShards bool - adaptivity bool - nodesShard uint32 - nodesMeta uint32 - shardHysteresis uint32 - metaHysteresis uint32 - activeNodesConfig config.MaxNodesChangeConfig - availableNodesConfigs []config.MaxNodesChangeConfig - mutShufflerParams sync.RWMutex - validatorDistributor ValidatorsDistributor - enableEpochsHandler common.EnableEpochsHandler + adaptivity bool + nodesShard uint32 + nodesMeta uint32 + shardHysteresis uint32 + metaHysteresis uint32 + activeNodesConfig config.MaxNodesChangeConfig + availableNodesConfigs []config.MaxNodesChangeConfig + mutShufflerParams sync.RWMutex + validatorDistributor ValidatorsDistributor + enableEpochsHandler common.EnableEpochsHandler + stakingV4Step2EnableEpoch uint32 + flagStakingV4Step2 atomic.Flag + stakingV4Step3EnableEpoch uint32 + flagStakingV4Step3 atomic.Flag } // NewHashValidatorsShuffler creates a validator shuffler that uses a hash between validator key and a given @@ -71,7 +92,7 @@ func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, erro } err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.BalanceWaitingListsFlag, - common.WaitingListFixFlag, + common.CleanupAuctionOnLowWaitingListFlag, }) if err != nil { return nil, err @@ -80,6 +101,9 @@ func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, erro var configs []config.MaxNodesChangeConfig log.Debug("hashValidatorShuffler: enable epoch for max nodes change", "epoch", args.MaxNodesEnableConfig) + log.Debug("hashValidatorShuffler: enable epoch for staking v4 step 2", "epoch", args.EnableEpochs.StakingV4Step2EnableEpoch) + log.Debug("hashValidatorShuffler: enable epoch for staking v4 step 3", "epoch", args.EnableEpochs.StakingV4Step3EnableEpoch) + if args.MaxNodesEnableConfig != nil { configs = make([]config.MaxNodesChangeConfig, len(args.MaxNodesEnableConfig)) copy(configs, args.MaxNodesEnableConfig) @@ -87,9 +111,11 @@ func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, erro log.Debug("Shuffler created", "shuffleBetweenShards", args.ShuffleBetweenShards) rxs := &randHashShuffler{ - shuffleBetweenShards: args.ShuffleBetweenShards, - availableNodesConfigs: configs, - enableEpochsHandler: args.EnableEpochsHandler, + shuffleBetweenShards: args.ShuffleBetweenShards, + availableNodesConfigs: configs, + enableEpochsHandler: args.EnableEpochsHandler, + stakingV4Step2EnableEpoch: args.EnableEpochs.StakingV4Step2EnableEpoch, + stakingV4Step3EnableEpoch: args.EnableEpochs.StakingV4Step3EnableEpoch, } rxs.UpdateParams(args.NodesShard, args.NodesMeta, args.Hysteresis, args.Adaptivity) @@ -173,19 +199,23 @@ func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNo } return shuffleNodes(shuffleNodesArg{ - eligible: eligibleAfterReshard, - waiting: waitingAfterReshard, - unstakeLeaving: args.UnStakeLeaving, - additionalLeaving: args.AdditionalLeaving, - newNodes: args.NewNodes, - randomness: args.Rand, - nodesMeta: nodesMeta, - nodesPerShard: nodesPerShard, - nbShards: args.NbShards, - distributor: rhs.validatorDistributor, - maxNodesToSwapPerShard: rhs.activeNodesConfig.NodesToShufflePerShard, - flagBalanceWaitingLists: rhs.enableEpochsHandler.IsFlagEnabledInEpoch(common.BalanceWaitingListsFlag, args.Epoch), - flagWaitingListFix: rhs.enableEpochsHandler.IsFlagEnabledInEpoch(common.WaitingListFixFlag, args.Epoch), + eligible: eligibleAfterReshard, + waiting: waitingAfterReshard, + unstakeLeaving: args.UnStakeLeaving, + additionalLeaving: args.AdditionalLeaving, + newNodes: args.NewNodes, + auction: args.Auction, + randomness: args.Rand, + nodesMeta: nodesMeta, + nodesPerShard: nodesPerShard, + nbShards: args.NbShards, + distributor: rhs.validatorDistributor, + maxNodesToSwapPerShard: rhs.activeNodesConfig.NodesToShufflePerShard, + flagBalanceWaitingLists: rhs.enableEpochsHandler.IsFlagEnabledInEpoch(common.BalanceWaitingListsFlag, args.Epoch), + flagStakingV4Step2: rhs.flagStakingV4Step2.IsSet(), + flagStakingV4Step3: rhs.flagStakingV4Step3.IsSet(), + maxNumNodes: rhs.activeNodesConfig.MaxNumNodes, + flagCleanupAuctionOnLowWaitingList: rhs.enableEpochsHandler.IsFlagEnabledInEpoch(common.CleanupAuctionOnLowWaitingListFlag, args.Epoch), }) } @@ -263,18 +293,12 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { eligibleCopy, waitingCopy, numToRemove, - remainingUnstakeLeaving, - int(arg.nodesMeta), - int(arg.nodesPerShard), - arg.flagWaitingListFix) + remainingUnstakeLeaving) newEligible, newWaiting, stillRemainingAdditionalLeaving := removeLeavingNodesFromValidatorMaps( newEligible, newWaiting, numToRemove, - remainingAdditionalLeaving, - int(arg.nodesMeta), - int(arg.nodesPerShard), - arg.flagWaitingListFix) + remainingAdditionalLeaving) stillRemainingInLeaving := append(stillRemainingUnstakeLeaving, stillRemainingAdditionalLeaving...) @@ -282,26 +306,60 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { err = moveMaxNumNodesToMap(newEligible, newWaiting, arg.nodesMeta, arg.nodesPerShard) if err != nil { - log.Warn("moveNodesToMap failed", "error", err) + return nil, fmt.Errorf("moveNodesToMap failed, error: %w", err) } - err = distributeValidators(newWaiting, arg.newNodes, arg.randomness, false) + err = checkAndDistributeNewNodes(newWaiting, arg.newNodes, arg.randomness, arg.flagStakingV4Step3) if err != nil { - log.Warn("distributeValidators newNodes failed", "error", err) + return nil, fmt.Errorf("distributeValidators newNodes failed, error: %w", err) } - err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) - if err != nil { - log.Warn("distributeValidators shuffledOut failed", "error", err) + shuffledNodesCfg := &shuffledNodesConfig{ + numShuffled: getNumPubKeys(shuffledOutMap), + numNewEligible: getNumPubKeys(newEligible), + numNewWaiting: getNumPubKeys(newWaiting), + numSelectedAuction: uint32(len(arg.auction)), + maxNumNodes: arg.maxNumNodes, + flagStakingV4Step2: arg.flagStakingV4Step2, + } + + lowWaitingList := shouldDistributeShuffledToWaitingInStakingV4(shuffledNodesCfg) + if arg.flagStakingV4Step3 || lowWaitingList { + log.Debug("distributing selected nodes from auction to waiting", + "num auction nodes", len(arg.auction), "num waiting nodes", shuffledNodesCfg.numNewWaiting) + + // Distribute selected validators from AUCTION -> WAITING + err = distributeValidators(newWaiting, arg.auction, arg.randomness, arg.flagBalanceWaitingLists) + if err != nil { + return nil, fmt.Errorf("distributeValidators auction list failed, error: %w", err) + } + } + + if !arg.flagStakingV4Step2 || lowWaitingList { + log.Debug("distributing shuffled out nodes to waiting", + "num shuffled nodes", shuffledNodesCfg.numShuffled, "num waiting nodes", shuffledNodesCfg.numNewWaiting) + + // Distribute validators from SHUFFLED OUT -> WAITING + err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) + if err != nil { + return nil, fmt.Errorf("distributeValidators shuffled out failed, error: %w", err) + } } actualLeaving, _ := removeValidatorsFromList(allLeaving, stillRemainingInLeaving, len(stillRemainingInLeaving)) + shouldCleanupAuction := false + if arg.flagCleanupAuctionOnLowWaitingList { + shouldCleanupAuction = lowWaitingList + } + return &ResUpdateNodes{ Eligible: newEligible, Waiting: newWaiting, + ShuffledOut: shuffledOutMap, Leaving: actualLeaving, StillRemaining: stillRemainingInLeaving, + LowWaitingList: shouldCleanupAuction, }, nil } @@ -381,62 +439,16 @@ func removeLeavingNodesFromValidatorMaps( waiting map[uint32][]Validator, numToRemove map[uint32]int, leaving []Validator, - minNodesMeta int, - minNodesPerShard int, - waitingFixEnabled bool, ) (map[uint32][]Validator, map[uint32][]Validator, []Validator) { stillRemainingInLeaving := make([]Validator, len(leaving)) copy(stillRemainingInLeaving, leaving) - if !waitingFixEnabled { - newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, numToRemove) - newEligible, stillRemainingInLeaving := removeNodesFromMap(eligible, stillRemainingInLeaving, numToRemove) - return newEligible, newWaiting, stillRemainingInLeaving - } - - return removeLeavingNodes(eligible, waiting, numToRemove, stillRemainingInLeaving, minNodesMeta, minNodesPerShard) -} - -func removeLeavingNodes( - eligible map[uint32][]Validator, - waiting map[uint32][]Validator, - numToRemove map[uint32]int, - stillRemainingInLeaving []Validator, - minNodesMeta int, - minNodesPerShard int, -) (map[uint32][]Validator, map[uint32][]Validator, []Validator) { - maxNumToRemoveFromWaiting := make(map[uint32]int) - for shardId := range eligible { - computedMinNumberOfNodes := computeMinNumberOfNodes(eligible, waiting, shardId, minNodesMeta, minNodesPerShard) - maxNumToRemoveFromWaiting[shardId] = computedMinNumberOfNodes - } - - newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, maxNumToRemoveFromWaiting) - - for shardId, toRemove := range numToRemove { - computedMinNumberOfNodes := computeMinNumberOfNodes(eligible, waiting, shardId, minNodesMeta, minNodesPerShard) - if toRemove > computedMinNumberOfNodes { - numToRemove[shardId] = computedMinNumberOfNodes - } - } - + newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, numToRemove) newEligible, stillRemainingInLeaving := removeNodesFromMap(eligible, stillRemainingInLeaving, numToRemove) return newEligible, newWaiting, stillRemainingInLeaving } -func computeMinNumberOfNodes(eligible map[uint32][]Validator, waiting map[uint32][]Validator, shardId uint32, minNodesMeta int, minNodesPerShard int) int { - minimumNumberOfNodes := minNodesPerShard - if shardId == core.MetachainShardId { - minimumNumberOfNodes = minNodesMeta - } - computedMinNumberOfNodes := len(eligible[shardId]) + len(waiting[shardId]) - minimumNumberOfNodes - if computedMinNumberOfNodes < 0 { - computedMinNumberOfNodes = 0 - } - return computedMinNumberOfNodes -} - // computeNewShards determines the new number of shards based on the number of nodes in the network func (rhs *randHashShuffler) computeNewShards( eligible map[uint32][]Validator, @@ -586,6 +598,51 @@ func removeValidatorFromList(validatorList []Validator, index int) []Validator { return validatorList[:len(validatorList)-1] } +func checkAndDistributeNewNodes( + waiting map[uint32][]Validator, + newNodes []Validator, + randomness []byte, + flagStakingV4Step3 bool, +) error { + if !flagStakingV4Step3 { + return distributeValidators(waiting, newNodes, randomness, false) + } + + if len(newNodes) > 0 { + return epochStart.ErrReceivedNewListNodeInStakingV4 + } + + return nil +} + +func shouldDistributeShuffledToWaitingInStakingV4(shuffledNodesCfg *shuffledNodesConfig) bool { + if !shuffledNodesCfg.flagStakingV4Step2 { + return false + } + + totalNewWaiting := shuffledNodesCfg.numNewWaiting + shuffledNodesCfg.numSelectedAuction + totalNodes := totalNewWaiting + shuffledNodesCfg.numNewEligible + shuffledNodesCfg.numShuffled + + log.Debug("checking if should distribute shuffled out nodes to waiting in staking v4", + "numShuffled", shuffledNodesCfg.numShuffled, + "numNewEligible", shuffledNodesCfg.numNewEligible, + "numSelectedAuction", shuffledNodesCfg.numSelectedAuction, + "totalNewWaiting", totalNewWaiting, + "totalNodes", totalNodes, + "maxNumNodes", shuffledNodesCfg.maxNumNodes, + ) + + distributeShuffledToWaitingInStakingV4 := false + if totalNodes <= shuffledNodesCfg.maxNumNodes { + log.Debug("num of total nodes in waiting is too low after shuffling; will distribute " + + "shuffled out nodes directly to waiting and skip sending them to auction") + + distributeShuffledToWaitingInStakingV4 = true + } + + return distributeShuffledToWaitingInStakingV4 +} + func removeValidatorFromListKeepOrder(validatorList []Validator, index int) []Validator { indexNotOK := index > len(validatorList)-1 || index < 0 if indexNotOK { @@ -646,6 +703,16 @@ func moveNodesToMap(destination map[uint32][]Validator, source map[uint32][]Vali return nil } +func getNumPubKeys(shardValidatorsMap map[uint32][]Validator) uint32 { + numPubKeys := uint32(0) + + for _, validatorsInShard := range shardValidatorsMap { + numPubKeys += uint32(len(validatorsInShard)) + } + + return numPubKeys +} + // moveMaxNumNodesToMap moves the validators in the source list to the corresponding destination list // but adding just enough nodes so that at most the number of nodes is kept in the destination list // The parameter maxNodesToMove is a limiting factor and should limit the number of nodes @@ -778,6 +845,12 @@ func (rhs *randHashShuffler) updateShufflerConfig(epoch uint32) { "epochEnable", rhs.activeNodesConfig.EpochEnable, "maxNodesToShufflePerShard", rhs.activeNodesConfig.NodesToShufflePerShard, ) + + rhs.flagStakingV4Step3.SetValue(epoch >= rhs.stakingV4Step3EnableEpoch) + log.Debug("staking v4 step3", "enabled", rhs.flagStakingV4Step3.IsSet()) + + rhs.flagStakingV4Step2.SetValue(epoch >= rhs.stakingV4Step2EnableEpoch) + log.Debug("staking v4 step2", "enabled", rhs.flagStakingV4Step2.IsSet()) } func (rhs *randHashShuffler) sortConfigs() { diff --git a/sharding/nodesCoordinator/hashValidatorShuffler_test.go b/sharding/nodesCoordinator/hashValidatorShuffler_test.go index 79a8ed1e7f8..788ec3f9b59 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler_test.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler_test.go @@ -13,10 +13,9 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/sharding/mock" - "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -194,8 +193,11 @@ func createHashShufflerInter() (*randHashShuffler, error) { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: true, - MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -210,8 +212,11 @@ func createHashShufflerIntraShards() (*randHashShuffler, error) { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -989,10 +994,7 @@ func Test_shuffleOutNodesWithLeaving(t *testing.T) { copyEligibleMap, copyWaitingMap, numToRemove, - leaving, - eligibleNodesPerShard, - eligibleNodesPerShard, - true) + leaving) shuffledOut, newEligible := shuffleOutNodes(newEligible, numToRemove, randomness) shuffleOutList := make([]Validator, 0) for _, shuffledOutPerShard := range shuffledOut { @@ -1027,10 +1029,7 @@ func Test_shuffleOutNodesWithLeavingMoreThanWaiting(t *testing.T) { copyEligibleMap, copyWaitingMap, numToRemove, - leaving, - eligibleNodesPerShard, - eligibleNodesPerShard, - true) + leaving) shuffledOut, newEligible := shuffleOutNodes(newEligible, numToRemove, randomness) shuffleOutList := make([]Validator, 0) @@ -1048,52 +1047,30 @@ func Test_removeLeavingNodesFromValidatorMaps(t *testing.T) { waitingNodesPerShard := 40 nbShards := uint32(2) - tests := []struct { - waitingFixEnabled bool - remainingToRemove int - }{ - { - waitingFixEnabled: false, - remainingToRemove: 18, - }, - { - waitingFixEnabled: true, - remainingToRemove: 20, - }, + leaving := make([]Validator, 0) + + eligibleMap := generateValidatorMap(eligibleNodesPerShard, nbShards) + waitingMap := generateValidatorMap(waitingNodesPerShard, nbShards) + for _, waitingValidators := range waitingMap { + leaving = append(leaving, waitingValidators[:2]...) } - for _, tt := range tests { - t.Run("", func(t *testing.T) { - leaving := make([]Validator, 0) + numToRemove := make(map[uint32]int) - eligibleMap := generateValidatorMap(eligibleNodesPerShard, nbShards) - waitingMap := generateValidatorMap(waitingNodesPerShard, nbShards) - for _, waitingValidators := range waitingMap { - leaving = append(leaving, waitingValidators[:2]...) - } + for shardId := range waitingMap { + numToRemove[shardId] = maxShuffleOutNumber + } + copyEligibleMap := copyValidatorMap(eligibleMap) + copyWaitingMap := copyValidatorMap(waitingMap) - numToRemove := make(map[uint32]int) + _, _, _ = removeLeavingNodesFromValidatorMaps( + copyEligibleMap, + copyWaitingMap, + numToRemove, + leaving) - for shardId := range waitingMap { - numToRemove[shardId] = maxShuffleOutNumber - } - copyEligibleMap := copyValidatorMap(eligibleMap) - copyWaitingMap := copyValidatorMap(waitingMap) - - _, _, _ = removeLeavingNodesFromValidatorMaps( - copyEligibleMap, - copyWaitingMap, - numToRemove, - leaving, - eligibleNodesPerShard, - eligibleNodesPerShard, - tt.waitingFixEnabled, - ) - - for _, remainingToRemove := range numToRemove { - require.Equal(t, tt.remainingToRemove, remainingToRemove) - } - }) + for _, remainingToRemove := range numToRemove { + require.Equal(t, 18, remainingToRemove) } } @@ -1188,15 +1165,17 @@ func TestRandHashShuffler_UpdateParams(t *testing.T) { require.Nil(t, err) shuffler2 := &randHashShuffler{ - nodesShard: 200, - nodesMeta: 200, - shardHysteresis: 0, - metaHysteresis: 0, - adaptivity: true, - shuffleBetweenShards: true, - validatorDistributor: &CrossShardValidatorDistributor{}, - availableNodesConfigs: nil, - enableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + nodesShard: 200, + nodesMeta: 200, + shardHysteresis: 0, + metaHysteresis: 0, + adaptivity: true, + shuffleBetweenShards: true, + validatorDistributor: &CrossShardValidatorDistributor{}, + availableNodesConfigs: nil, + stakingV4Step2EnableEpoch: 443, + stakingV4Step3EnableEpoch: 444, + enableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler.UpdateParams( @@ -1300,12 +1279,6 @@ func TestRandHashShuffler_UpdateNodeListsWaitingListFixDisabled(t *testing.T) { testUpdateNodesAndCheckNumLeaving(t, true) } -func TestRandHashShuffler_UpdateNodeListsWithWaitingListFixEnabled(t *testing.T) { - t.Parallel() - - testUpdateNodesAndCheckNumLeaving(t, false) -} - func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { eligiblePerShard := 400 eligibleMeta := 10 @@ -1317,11 +1290,6 @@ func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { numNodesToShuffle := 80 - waitingListFixEnableEpoch := 0 - if beforeFix { - waitingListFixEnableEpoch = 9999 - } - shufflerArgs := &NodesShufflerArgs{ NodesShard: uint32(eligiblePerShard), NodesMeta: uint32(eligibleMeta), @@ -1335,14 +1303,7 @@ func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { NodesToShufflePerShard: uint32(numNodesToShuffle), }, }, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { - if flag == common.WaitingListFixFlag { - return epoch >= uint32(waitingListFixEnableEpoch) - } - return false - }, - }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -1371,34 +1332,15 @@ func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { } } -func TestRandHashShuffler_UpdateNodeListsWaitingListWithFixCheckWaitingDisabled(t *testing.T) { - t.Parallel() - - testUpdateNodeListsAndCheckWaitingList(t, true) -} - -func TestRandHashShuffler_UpdateNodeListsWaitingListWithFixCheckWaitingEnabled(t *testing.T) { - t.Parallel() - - testUpdateNodeListsAndCheckWaitingList(t, false) -} - -func testUpdateNodeListsAndCheckWaitingList(t *testing.T, beforeFix bool) { +func TestRandHashShuffler_UpdateNodeListsAndCheckWaitingList(t *testing.T) { eligiblePerShard := 400 eligibleMeta := 10 waitingPerShard := 400 nbShards := 1 - numLeaving := 2 - numNodesToShuffle := 80 - waitingListFixEnableEpoch := 0 - if beforeFix { - waitingListFixEnableEpoch = 9999 - } - shufflerArgs := &NodesShufflerArgs{ NodesShard: uint32(eligiblePerShard), NodesMeta: uint32(eligibleMeta), @@ -1412,14 +1354,7 @@ func testUpdateNodeListsAndCheckWaitingList(t *testing.T, beforeFix bool) { NodesToShufflePerShard: uint32(numNodesToShuffle), }, }, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { - if flag == common.WaitingListFixFlag { - return epoch >= uint32(waitingListFixEnableEpoch) - } - return false - }, - }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -1453,9 +1388,7 @@ func testUpdateNodeListsAndCheckWaitingList(t *testing.T, beforeFix bool) { } expectedNumWaitingMovedToEligible := numNodesToShuffle - if beforeFix { - expectedNumWaitingMovedToEligible -= numLeaving - } + expectedNumWaitingMovedToEligible -= numLeaving assert.Equal(t, expectedNumWaitingMovedToEligible, numWaitingListToEligible) } @@ -1763,10 +1696,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_FromEligible(t *te eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) assert.Equal(t, eligiblePerShard-1, len(newEligible[core.MetachainShardId])) assert.Equal(t, waitingPerShard, len(newWaiting[core.MetachainShardId])) @@ -1804,10 +1734,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_FromWaiting(t *tes eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) assert.Equal(t, eligiblePerShard, len(newEligible[core.MetachainShardId])) assert.Equal(t, waitingPerShard-1, len(newWaiting[core.MetachainShardId])) @@ -1843,10 +1770,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_NonExisting(t *tes eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) assert.Equal(t, eligiblePerShard, len(newEligible[core.MetachainShardId])) assert.Equal(t, waitingPerShard, len(newWaiting[core.MetachainShardId])) @@ -1889,10 +1813,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_2Eligible2Waiting2 eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) remainingInEligible := eligiblePerShard - 2 remainingInWaiting := waitingPerShard - 2 @@ -1949,10 +1870,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_2FromEligible2From eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) // removed first 2 from waiting and just one from eligible remainingInEligible := eligiblePerShard - 1 @@ -2403,8 +2321,11 @@ func TestRandHashShuffler_UpdateNodeLists_All(t *testing.T) { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) @@ -2509,6 +2430,7 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_NoWaiting(t *testing.T) { ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{StakingV4Step3EnableEpoch: stakingV4Epoch}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -2570,6 +2492,7 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_NilOrEmptyWaiting(t *test ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{StakingV4Step3EnableEpoch: stakingV4Epoch}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) @@ -2642,6 +2565,57 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting(t *testing.T) assert.Equal(t, previousNumberOfNodes, currentNumberOfNodes) } +func TestRandHashShuffler_UpdateNodeLists_WithStakingV4(t *testing.T) { + t.Parallel() + + numEligiblePerShard := 100 + numWaitingPerShard := 30 + numAuction := 40 + nbShards := uint32(2) + + eligibleMap := generateValidatorMap(numEligiblePerShard, nbShards) + waitingMap := generateValidatorMap(numWaitingPerShard, nbShards) + auctionList := generateValidatorList(numAuction) + + args := ArgsUpdateNodes{ + Eligible: eligibleMap, + Waiting: waitingMap, + UnStakeLeaving: make([]Validator, 0), + AdditionalLeaving: make([]Validator, 0), + Rand: generateRandomByteArray(32), + Auction: auctionList, + NbShards: nbShards, + Epoch: stakingV4Epoch, + } + + shuffler, _ := createHashShufflerIntraShards() + resUpdateNodeList, err := shuffler.UpdateNodeLists(args) + require.Nil(t, err) + + for _, auctionNode := range args.Auction { + found, _ := searchInMap(resUpdateNodeList.Waiting, auctionNode.PubKey()) + assert.True(t, found) + } + + allShuffledOut := getValidatorsInMap(resUpdateNodeList.ShuffledOut) + for _, shuffledOut := range allShuffledOut { + found, _ := searchInMap(args.Eligible, shuffledOut.PubKey()) + assert.True(t, found) + } + + allNewEligible := getValidatorsInMap(resUpdateNodeList.Eligible) + allNewWaiting := getValidatorsInMap(resUpdateNodeList.Waiting) + + previousNumberOfNodes := (numEligiblePerShard+numWaitingPerShard)*(int(nbShards)+1) + numAuction + currentNumberOfNodes := len(allNewEligible) + len(allNewWaiting) + len(allShuffledOut) + assert.Equal(t, previousNumberOfNodes, currentNumberOfNodes) + + args.NewNodes = generateValidatorList(100 * (int(nbShards) + 1)) + resUpdateNodeList, err = shuffler.UpdateNodeLists(args) + require.ErrorIs(t, err, epochStart.ErrReceivedNewListNodeInStakingV4) + require.Nil(t, resUpdateNodeList) +} + func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting_WithLeaving(t *testing.T) { t.Parallel() @@ -2699,8 +2673,11 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting_WithLeaving(t Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go index b591e94e3e2..4898f018010 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go @@ -15,11 +15,12 @@ import ( "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" - logger "github.com/multiversx/mx-chain-logger-go" ) var _ NodesCoordinator = (*indexHashedNodesCoordinator)(nil) @@ -58,45 +59,50 @@ func (v validatorList) Less(i, j int) bool { // TODO: add a parameter for shardID when acting as observer type epochNodesConfig struct { - nbShards uint32 - shardID uint32 - eligibleMap map[uint32][]Validator - waitingMap map[uint32][]Validator - selectors map[uint32]RandomSelector - leavingMap map[uint32][]Validator - newList []Validator - mutNodesMaps sync.RWMutex + nbShards uint32 + shardID uint32 + eligibleMap map[uint32][]Validator + waitingMap map[uint32][]Validator + selectors map[uint32]RandomSelector + leavingMap map[uint32][]Validator + shuffledOutMap map[uint32][]Validator + newList []Validator + auctionList []Validator + mutNodesMaps sync.RWMutex + lowWaitingList bool } type indexHashedNodesCoordinator struct { - shardIDAsObserver uint32 - currentEpoch uint32 - shardConsensusGroupSize int - metaConsensusGroupSize int - numTotalEligible uint64 - selfPubKey []byte - savedStateKey []byte - marshalizer marshal.Marshalizer - hasher hashing.Hasher - shuffler NodesShuffler - epochStartRegistrationHandler EpochStartEventNotifier - bootStorer storage.Storer - nodesConfig map[uint32]*epochNodesConfig - mutNodesConfig sync.RWMutex - mutSavedStateKey sync.RWMutex - nodesCoordinatorHelper NodesCoordinatorHelper - consensusGroupCacher Cacher - loadingFromDisk atomic.Value - shuffledOutHandler ShuffledOutHandler - startEpoch uint32 - publicKeyToValidatorMap map[string]*validatorWithShardID - isFullArchive bool - chanStopNode chan endProcess.ArgEndProcess - flagWaitingListFix atomicFlags.Flag - nodeTypeProvider NodeTypeProviderHandler - enableEpochsHandler common.EnableEpochsHandler - validatorInfoCacher epochStart.ValidatorInfoCacher - genesisNodesSetupHandler GenesisNodesSetupHandler + shardIDAsObserver uint32 + currentEpoch uint32 + shardConsensusGroupSize int + metaConsensusGroupSize int + numTotalEligible uint64 + selfPubKey []byte + savedStateKey []byte + marshalizer marshal.Marshalizer + hasher hashing.Hasher + shuffler NodesShuffler + epochStartRegistrationHandler EpochStartEventNotifier + bootStorer storage.Storer + nodesConfig map[uint32]*epochNodesConfig + mutNodesConfig sync.RWMutex + mutSavedStateKey sync.RWMutex + nodesCoordinatorHelper NodesCoordinatorHelper + consensusGroupCacher Cacher + loadingFromDisk atomic.Value + shuffledOutHandler ShuffledOutHandler + startEpoch uint32 + publicKeyToValidatorMap map[string]*validatorWithShardID + isFullArchive bool + chanStopNode chan endProcess.ArgEndProcess + nodeTypeProvider NodeTypeProviderHandler + enableEpochsHandler common.EnableEpochsHandler + validatorInfoCacher epochStart.ValidatorInfoCacher + genesisNodesSetupHandler GenesisNodesSetupHandler + flagStakingV4Step2 atomicFlags.Flag + nodesCoordinatorRegistryFactory NodesCoordinatorRegistryFactory + flagStakingV4Started atomicFlags.Flag } // NewIndexHashedNodesCoordinator creates a new index hashed group selector @@ -109,52 +115,57 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed nodesConfig := make(map[uint32]*epochNodesConfig, nodesCoordinatorStoredEpochs) nodesConfig[arguments.Epoch] = &epochNodesConfig{ - nbShards: arguments.NbShards, - shardID: arguments.ShardIDAsObserver, - eligibleMap: make(map[uint32][]Validator), - waitingMap: make(map[uint32][]Validator), - selectors: make(map[uint32]RandomSelector), - leavingMap: make(map[uint32][]Validator), - newList: make([]Validator, 0), - } - + nbShards: arguments.NbShards, + shardID: arguments.ShardIDAsObserver, + eligibleMap: make(map[uint32][]Validator), + waitingMap: make(map[uint32][]Validator), + selectors: make(map[uint32]RandomSelector), + leavingMap: make(map[uint32][]Validator), + shuffledOutMap: make(map[uint32][]Validator), + newList: make([]Validator, 0), + auctionList: make([]Validator, 0), + lowWaitingList: false, + } + + // todo: if not genesis, use previous randomness from start of epoch meta block savedKey := arguments.Hasher.Compute(string(arguments.SelfPublicKey)) ihnc := &indexHashedNodesCoordinator{ - marshalizer: arguments.Marshalizer, - hasher: arguments.Hasher, - shuffler: arguments.Shuffler, - epochStartRegistrationHandler: arguments.EpochStartNotifier, - bootStorer: arguments.BootStorer, - selfPubKey: arguments.SelfPublicKey, - nodesConfig: nodesConfig, - currentEpoch: arguments.Epoch, - savedStateKey: savedKey, - shardConsensusGroupSize: arguments.ShardConsensusGroupSize, - metaConsensusGroupSize: arguments.MetaConsensusGroupSize, - consensusGroupCacher: arguments.ConsensusGroupCache, - shardIDAsObserver: arguments.ShardIDAsObserver, - shuffledOutHandler: arguments.ShuffledOutHandler, - startEpoch: arguments.StartEpoch, - publicKeyToValidatorMap: make(map[string]*validatorWithShardID), - chanStopNode: arguments.ChanStopNode, - nodeTypeProvider: arguments.NodeTypeProvider, - isFullArchive: arguments.IsFullArchive, - enableEpochsHandler: arguments.EnableEpochsHandler, - validatorInfoCacher: arguments.ValidatorInfoCacher, - genesisNodesSetupHandler: arguments.GenesisNodesSetupHandler, + marshalizer: arguments.Marshalizer, + hasher: arguments.Hasher, + shuffler: arguments.Shuffler, + epochStartRegistrationHandler: arguments.EpochStartNotifier, + bootStorer: arguments.BootStorer, + selfPubKey: arguments.SelfPublicKey, + nodesConfig: nodesConfig, + currentEpoch: arguments.Epoch, + savedStateKey: savedKey, + shardConsensusGroupSize: arguments.ShardConsensusGroupSize, + metaConsensusGroupSize: arguments.MetaConsensusGroupSize, + consensusGroupCacher: arguments.ConsensusGroupCache, + shardIDAsObserver: arguments.ShardIDAsObserver, + shuffledOutHandler: arguments.ShuffledOutHandler, + startEpoch: arguments.StartEpoch, + publicKeyToValidatorMap: make(map[string]*validatorWithShardID), + chanStopNode: arguments.ChanStopNode, + nodeTypeProvider: arguments.NodeTypeProvider, + isFullArchive: arguments.IsFullArchive, + enableEpochsHandler: arguments.EnableEpochsHandler, + validatorInfoCacher: arguments.ValidatorInfoCacher, + genesisNodesSetupHandler: arguments.GenesisNodesSetupHandler, + nodesCoordinatorRegistryFactory: arguments.NodesCoordinatorRegistryFactory, } ihnc.loadingFromDisk.Store(false) ihnc.nodesCoordinatorHelper = ihnc - err = ihnc.setNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, nil, arguments.Epoch) + err = ihnc.setNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, nil, nil, arguments.Epoch, false) if err != nil { return nil, err } ihnc.fillPublicKeyToValidatorMap() - err = ihnc.saveState(ihnc.savedStateKey) + err = ihnc.saveState(ihnc.savedStateKey, arguments.Epoch) if err != nil { log.Error("saving initial nodes coordinator config failed", "error", err.Error()) @@ -175,6 +186,7 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed currentConfig.waitingMap, currentConfig.leavingMap, make(map[uint32][]Validator), + currentConfig.shuffledOutMap, currentConfig.nbShards) ihnc.epochStartRegistrationHandler.RegisterHandler(ihnc) @@ -216,6 +228,9 @@ func checkArguments(arguments ArgNodesCoordinator) error { if check.IfNil(arguments.NodeTypeProvider) { return ErrNilNodeTypeProvider } + if check.IfNil(arguments.NodesCoordinatorRegistryFactory) { + return ErrNilNodesCoordinatorRegistryFactory + } if nil == arguments.ChanStopNode { return ErrNilNodeStopChannel } @@ -224,7 +239,6 @@ func checkArguments(arguments ArgNodesCoordinator) error { } err := core.CheckHandlerCompatibility(arguments.EnableEpochsHandler, []core.EnableEpochFlag{ common.RefactorPeersMiniBlocksFlag, - common.WaitingListFixFlag, }) if err != nil { return err @@ -244,7 +258,9 @@ func (ihnc *indexHashedNodesCoordinator) setNodesPerShards( eligible map[uint32][]Validator, waiting map[uint32][]Validator, leaving map[uint32][]Validator, + shuffledOut map[uint32][]Validator, epoch uint32, + lowWaitingList bool, ) error { ihnc.mutNodesConfig.Lock() defer ihnc.mutNodesConfig.Unlock() @@ -283,6 +299,8 @@ func (ihnc *indexHashedNodesCoordinator) setNodesPerShards( nodesConfig.eligibleMap = eligible nodesConfig.waitingMap = waiting nodesConfig.leavingMap = leaving + nodesConfig.shuffledOutMap = shuffledOut + nodesConfig.lowWaitingList = lowWaitingList nodesConfig.shardID, isCurrentNodeValidator = ihnc.computeShardForSelfPublicKey(nodesConfig) nodesConfig.selectors, err = ihnc.createSelectors(nodesConfig) if err != nil { @@ -509,6 +527,50 @@ func (ihnc *indexHashedNodesCoordinator) GetAllLeavingValidatorsPublicKeys(epoch return validatorsPubKeys, nil } +// GetAllShuffledOutValidatorsPublicKeys will return all shuffled out validator public keys from all shards +func (ihnc *indexHashedNodesCoordinator) GetAllShuffledOutValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { + validatorsPubKeys := make(map[uint32][][]byte) + + ihnc.mutNodesConfig.RLock() + nodesConfig, ok := ihnc.nodesConfig[epoch] + ihnc.mutNodesConfig.RUnlock() + + if !ok { + return nil, fmt.Errorf("%w epoch=%v", ErrEpochNodesConfigDoesNotExist, epoch) + } + + nodesConfig.mutNodesMaps.RLock() + defer nodesConfig.mutNodesMaps.RUnlock() + + for shardID, shuffledOutList := range nodesConfig.shuffledOutMap { + for _, shuffledOutValidator := range shuffledOutList { + validatorsPubKeys[shardID] = append(validatorsPubKeys[shardID], shuffledOutValidator.PubKey()) + } + } + + return validatorsPubKeys, nil +} + +// GetShuffledOutToAuctionValidatorsPublicKeys will return shuffled out to auction validators public keys +func (ihnc *indexHashedNodesCoordinator) GetShuffledOutToAuctionValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { + validatorsPubKeys := make(map[uint32][][]byte) + + ihnc.mutNodesConfig.RLock() + nodesConfig, ok := ihnc.nodesConfig[epoch] + ihnc.mutNodesConfig.RUnlock() + + if !ok { + return nil, fmt.Errorf("%w epoch=%v", ErrEpochNodesConfigDoesNotExist, epoch) + } + + if nodesConfig.lowWaitingList { + // in case of low waiting list the nodes do not go through auction but directly to waiting + return validatorsPubKeys, nil + } + + return ihnc.GetAllShuffledOutValidatorsPublicKeys(epoch) +} + // GetValidatorsIndexes will return validators indexes for a block func (ihnc *indexHashedNodesCoordinator) GetValidatorsIndexes( publicKeys []string, @@ -563,7 +625,8 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa return } - if _, ok := metaHdr.(*block.MetaBlock); !ok { + _, castOk := metaHdr.(*block.MetaBlock) + if !castOk { log.Error("could not process EpochStartPrepare on nodesCoordinator - not metaBlock") return } @@ -584,37 +647,13 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa return } - ihnc.mutNodesConfig.RLock() - previousConfig := ihnc.nodesConfig[ihnc.currentEpoch] - if previousConfig == nil { - log.Error("previous nodes config is nil") - ihnc.mutNodesConfig.RUnlock() - return - } - - // TODO: remove the copy if no changes are done to the maps - copiedPrevious := &epochNodesConfig{} - copiedPrevious.eligibleMap = copyValidatorMap(previousConfig.eligibleMap) - copiedPrevious.waitingMap = copyValidatorMap(previousConfig.waitingMap) - copiedPrevious.nbShards = previousConfig.nbShards - - ihnc.mutNodesConfig.RUnlock() - // TODO: compare with previous nodesConfig if exists - newNodesConfig, err := ihnc.computeNodesConfigFromList(copiedPrevious, allValidatorInfo) + newNodesConfig, err := ihnc.computeNodesConfigFromList(allValidatorInfo) if err != nil { log.Error("could not compute nodes config from list - do nothing on nodesCoordinator epochStartPrepare") return } - if copiedPrevious.nbShards != newNodesConfig.nbShards { - log.Warn("number of shards does not match", - "previous epoch", ihnc.currentEpoch, - "previous number of shards", copiedPrevious.nbShards, - "new epoch", newEpoch, - "new number of shards", newNodesConfig.nbShards) - } - additionalLeavingMap, err := ihnc.nodesCoordinatorHelper.ComputeAdditionalLeaving(allValidatorInfo) if err != nil { log.Error("could not compute additionalLeaving Nodes - do nothing on nodesCoordinator epochStartPrepare") @@ -628,6 +667,7 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa Eligible: newNodesConfig.eligibleMap, Waiting: newNodesConfig.waitingMap, NewNodes: newNodesConfig.newList, + Auction: newNodesConfig.auctionList, UnStakeLeaving: unStakeLeavingList, AdditionalLeaving: additionalLeavingList, Rand: randomness, @@ -647,13 +687,13 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa resUpdateNodes.Leaving, ) - err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, newEpoch) + err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, resUpdateNodes.ShuffledOut, newEpoch, resUpdateNodes.LowWaitingList) if err != nil { log.Error("set nodes per shard failed", "error", err.Error()) } ihnc.fillPublicKeyToValidatorMap() - err = ihnc.saveState(randomness) + err = ihnc.saveState(randomness, newEpoch) ihnc.handleErrorLog(err, "saving nodes coordinator config failed") displayNodesConfiguration( @@ -661,6 +701,7 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa resUpdateNodes.Waiting, leavingNodesMap, stillRemainingNodesMap, + resUpdateNodes.ShuffledOut, newNodesConfig.nbShards) ihnc.mutSavedStateKey.Lock() @@ -714,18 +755,13 @@ func (ihnc *indexHashedNodesCoordinator) GetChance(_ uint32) uint32 { } func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( - previousEpochConfig *epochNodesConfig, validatorInfos []*state.ShardValidatorInfo, ) (*epochNodesConfig, error) { eligibleMap := make(map[uint32][]Validator) waitingMap := make(map[uint32][]Validator) leavingMap := make(map[uint32][]Validator) newNodesList := make([]Validator, 0) - - if ihnc.flagWaitingListFix.IsSet() && previousEpochConfig == nil { - return nil, ErrNilPreviousEpochConfig - } - + auctionList := make([]Validator, 0) if len(validatorInfos) == 0 { log.Warn("computeNodesConfigFromList - validatorInfos len is 0") } @@ -743,25 +779,41 @@ func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( case string(common.EligibleList): eligibleMap[validatorInfo.ShardId] = append(eligibleMap[validatorInfo.ShardId], currentValidator) case string(common.LeavingList): - log.Debug("leaving node validatorInfo", "pk", validatorInfo.PublicKey) + log.Debug("leaving node validatorInfo", + "pk", validatorInfo.PublicKey, + "previous list", validatorInfo.PreviousList, + "current index", validatorInfo.Index, + "previous index", validatorInfo.PreviousIndex, + "shardId", validatorInfo.ShardId) leavingMap[validatorInfo.ShardId] = append(leavingMap[validatorInfo.ShardId], currentValidator) ihnc.addValidatorToPreviousMap( - previousEpochConfig, eligibleMap, waitingMap, currentValidator, - validatorInfo.ShardId) + validatorInfo, + ) case string(common.NewList): + if ihnc.flagStakingV4Step2.IsSet() { + return nil, epochStart.ErrReceivedNewListNodeInStakingV4 + } log.Debug("new node registered", "pk", validatorInfo.PublicKey) newNodesList = append(newNodesList, currentValidator) case string(common.InactiveList): log.Debug("inactive validator", "pk", validatorInfo.PublicKey) case string(common.JailedList): log.Debug("jailed validator", "pk", validatorInfo.PublicKey) + case string(common.SelectedFromAuctionList): + log.Debug("selected node from auction", "pk", validatorInfo.PublicKey) + if ihnc.flagStakingV4Step2.IsSet() { + auctionList = append(auctionList, currentValidator) + } else { + return nil, ErrReceivedAuctionValidatorsBeforeStakingV4 + } } } sort.Sort(validatorList(newNodesList)) + sort.Sort(validatorList(auctionList)) for _, eligibleList := range eligibleMap { sort.Sort(validatorList(eligibleList)) } @@ -783,6 +835,7 @@ func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( waitingMap: waitingMap, leavingMap: leavingMap, newList: newNodesList, + auctionList: auctionList, nbShards: uint32(nbShards), } @@ -790,30 +843,49 @@ func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( } func (ihnc *indexHashedNodesCoordinator) addValidatorToPreviousMap( - previousEpochConfig *epochNodesConfig, eligibleMap map[uint32][]Validator, waitingMap map[uint32][]Validator, currentValidator *validator, - currentValidatorShardId uint32) { - - if !ihnc.flagWaitingListFix.IsSet() { - eligibleMap[currentValidatorShardId] = append(eligibleMap[currentValidatorShardId], currentValidator) + validatorInfo *state.ShardValidatorInfo, +) { + shardId := validatorInfo.ShardId + previousList := validatorInfo.PreviousList + + log.Debug("checking leaving node", + "current list", validatorInfo.List, + "previous list", previousList, + "current index", validatorInfo.Index, + "previous index", validatorInfo.PreviousIndex, + "pk", currentValidator.PubKey(), + "shardId", shardId) + + if !ihnc.flagStakingV4Started.IsSet() || len(previousList) == 0 { + log.Debug("leaving node before staking v4 or with not previous list set node found in", + "list", "eligible", "shardId", shardId, "previous list", previousList) + eligibleMap[shardId] = append(eligibleMap[shardId], currentValidator) return } - found, shardId := searchInMap(previousEpochConfig.eligibleMap, currentValidator.PubKey()) - if found { + if previousList == string(common.EligibleList) { log.Debug("leaving node found in", "list", "eligible", "shardId", shardId) - eligibleMap[shardId] = append(eligibleMap[currentValidatorShardId], currentValidator) + currentValidator.index = validatorInfo.PreviousIndex + eligibleMap[shardId] = append(eligibleMap[shardId], currentValidator) return } - found, shardId = searchInMap(previousEpochConfig.waitingMap, currentValidator.PubKey()) - if found { + if previousList == string(common.WaitingList) { log.Debug("leaving node found in", "list", "waiting", "shardId", shardId) - waitingMap[shardId] = append(waitingMap[currentValidatorShardId], currentValidator) + currentValidator.index = validatorInfo.PreviousIndex + waitingMap[shardId] = append(waitingMap[shardId], currentValidator) return } + + log.Debug("leaving node not found in eligible or waiting", + "previous list", previousList, + "current index", validatorInfo.Index, + "previous index", validatorInfo.PreviousIndex, + "pk", currentValidator.PubKey(), + "shardId", shardId) } func (ihnc *indexHashedNodesCoordinator) handleErrorLog(err error, message string) { @@ -837,7 +909,7 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartAction(hdr data.HeaderHandler needToRemove := epochToRemove >= 0 ihnc.currentEpoch = newEpoch - err := ihnc.saveState(ihnc.savedStateKey) + err := ihnc.saveState(ihnc.savedStateKey, newEpoch) ihnc.handleErrorLog(err, "saving nodes coordinator config failed") ihnc.mutNodesConfig.Lock() @@ -1044,6 +1116,18 @@ func (ihnc *indexHashedNodesCoordinator) computeShardForSelfPublicKey(nodesConfi return shardId, true } + if ihnc.flagStakingV4Step2.IsSet() { + found, shardId = searchInMap(nodesConfig.shuffledOutMap, pubKey) + if found { + log.Trace("computeShardForSelfPublicKey found validator in shuffled out", + "epoch", ihnc.currentEpoch, + "shard", shardId, + "validator PK", pubKey, + ) + return shardId, true + } + } + log.Trace("computeShardForSelfPublicKey returned default", "shard", selfShard, ) @@ -1241,8 +1325,11 @@ func (ihnc *indexHashedNodesCoordinator) getShardValidatorInfoData(txHash []byte } func (ihnc *indexHashedNodesCoordinator) updateEpochFlags(epoch uint32) { - ihnc.flagWaitingListFix.SetValue(epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.WaitingListFixFlag)) - log.Debug("indexHashedNodesCoordinator: waiting list fix", "enabled", ihnc.flagWaitingListFix.IsSet()) + ihnc.flagStakingV4Started.SetValue(epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.StakingV4Step1Flag)) + log.Debug("indexHashedNodesCoordinator: flagStakingV4Started", "enabled", ihnc.flagStakingV4Started.IsSet()) + + ihnc.flagStakingV4Step2.SetValue(epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.StakingV4Step2Flag)) + log.Debug("indexHashedNodesCoordinator: flagStakingV4Step2", "enabled", ihnc.flagStakingV4Step2.IsSet()) } // GetWaitingEpochsLeftForPublicKey returns the number of epochs left for the public key until it becomes eligible diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go index bb96c6ec15a..b5b87781a73 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go @@ -6,7 +6,7 @@ import ( // SetNodesConfigFromValidatorsInfo sets epoch config based on validators list configuration func (ihnc *indexHashedNodesCoordinator) SetNodesConfigFromValidatorsInfo(epoch uint32, randomness []byte, validatorsInfo []*state.ShardValidatorInfo) error { - newNodesConfig, err := ihnc.computeNodesConfigFromList(&epochNodesConfig{}, validatorsInfo) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorsInfo) if err != nil { return err } @@ -41,7 +41,7 @@ func (ihnc *indexHashedNodesCoordinator) SetNodesConfigFromValidatorsInfo(epoch resUpdateNodes.Leaving, ) - err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, epoch) + err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, resUpdateNodes.ShuffledOut, epoch, resUpdateNodes.LowWaitingList) if err != nil { return err } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go index 40f9995febe..55cbb326753 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go @@ -1,35 +1,12 @@ package nodesCoordinator import ( - "encoding/json" "fmt" "strconv" "github.com/multiversx/mx-chain-go/common" ) -// SerializableValidator holds the minimal data required for marshalling and un-marshalling a validator -type SerializableValidator struct { - PubKey []byte `json:"pubKey"` - Chances uint32 `json:"chances"` - Index uint32 `json:"index"` -} - -// EpochValidators holds one epoch configuration for a nodes coordinator -type EpochValidators struct { - EligibleValidators map[string][]*SerializableValidator `json:"eligibleValidators"` - WaitingValidators map[string][]*SerializableValidator `json:"waitingValidators"` - LeavingValidators map[string][]*SerializableValidator `json:"leavingValidators"` -} - -// NodesCoordinatorRegistry holds the data that can be used to initialize a nodes coordinator -type NodesCoordinatorRegistry struct { - EpochsConfig map[string]*EpochValidators `json:"epochConfigs"` - CurrentEpoch uint32 `json:"currentEpoch"` -} - -// TODO: add proto marshalizer for these package - replace all json marshalizers - // LoadState loads the nodes coordinator state from the used boot storage func (ihnc *indexHashedNodesCoordinator) LoadState(key []byte) error { return ihnc.baseLoadState(key) @@ -48,8 +25,7 @@ func (ihnc *indexHashedNodesCoordinator) baseLoadState(key []byte) error { return err } - config := &NodesCoordinatorRegistry{} - err = json.Unmarshal(data, config) + config, err := ihnc.nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry(data) if err != nil { return err } @@ -58,8 +34,8 @@ func (ihnc *indexHashedNodesCoordinator) baseLoadState(key []byte) error { ihnc.savedStateKey = key ihnc.mutSavedStateKey.Unlock() - ihnc.currentEpoch = config.CurrentEpoch - log.Debug("loaded nodes config", "current epoch", config.CurrentEpoch) + ihnc.currentEpoch = config.GetCurrentEpoch() + log.Debug("loaded nodes config", "current epoch", config.GetCurrentEpoch()) nodesConfig, err := ihnc.registryToNodesCoordinator(config) if err != nil { @@ -83,22 +59,31 @@ func displayNodesConfigInfo(config map[uint32]*epochNodesConfig) { } } -func (ihnc *indexHashedNodesCoordinator) saveState(key []byte) error { - registry := ihnc.NodesCoordinatorToRegistry() - data, err := json.Marshal(registry) +func (ihnc *indexHashedNodesCoordinator) saveState(key []byte, epoch uint32) error { + registry := ihnc.NodesCoordinatorToRegistry(epoch) + data, err := ihnc.nodesCoordinatorRegistryFactory.GetRegistryData(registry, epoch) if err != nil { return err } - ncInternalkey := append([]byte(common.NodesCoordinatorRegistryKeyPrefix), key...) - - log.Debug("saving nodes coordinator config", "key", ncInternalkey) + ncInternalKey := append([]byte(common.NodesCoordinatorRegistryKeyPrefix), key...) + log.Debug("saving nodes coordinator config", "key", ncInternalKey, "epoch", epoch) - return ihnc.bootStorer.Put(ncInternalkey, data) + return ihnc.bootStorer.Put(ncInternalKey, data) } // NodesCoordinatorToRegistry will export the nodesCoordinator data to the registry -func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry() *NodesCoordinatorRegistry { +func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry(epoch uint32) NodesCoordinatorRegistryHandler { + if epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.StakingV4Step2Flag) { + log.Debug("indexHashedNodesCoordinator.NodesCoordinatorToRegistry called with auction registry", "epoch", epoch) + return ihnc.nodesCoordinatorToRegistryWithAuction() + } + + log.Debug("indexHashedNodesCoordinator.NodesCoordinatorToRegistry called with old registry", "epoch", epoch) + return ihnc.nodesCoordinatorToOldRegistry() +} + +func (ihnc *indexHashedNodesCoordinator) nodesCoordinatorToOldRegistry() NodesCoordinatorRegistryHandler { ihnc.mutNodesConfig.RLock() defer ihnc.mutNodesConfig.RUnlock() @@ -107,13 +92,8 @@ func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry() *NodesCoor EpochsConfig: make(map[string]*EpochValidators), } - minEpoch := 0 - lastEpoch := ihnc.getLastEpochConfig() - if lastEpoch >= nodesCoordinatorStoredEpochs { - minEpoch = int(lastEpoch) - nodesCoordinatorStoredEpochs + 1 - } - - for epoch := uint32(minEpoch); epoch <= lastEpoch; epoch++ { + minEpoch, lastEpoch := ihnc.getMinAndLastEpoch() + for epoch := minEpoch; epoch <= lastEpoch; epoch++ { epochNodesData, ok := ihnc.nodesConfig[epoch] if !ok { continue @@ -125,6 +105,16 @@ func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry() *NodesCoor return registry } +func (ihnc *indexHashedNodesCoordinator) getMinAndLastEpoch() (uint32, uint32) { + minEpoch := 0 + lastEpoch := ihnc.getLastEpochConfig() + if lastEpoch >= nodesCoordinatorStoredEpochs { + minEpoch = int(lastEpoch) - nodesCoordinatorStoredEpochs + 1 + } + + return uint32(minEpoch), lastEpoch +} + func (ihnc *indexHashedNodesCoordinator) getLastEpochConfig() uint32 { lastEpoch := uint32(0) for epoch := range ihnc.nodesConfig { @@ -137,13 +127,13 @@ func (ihnc *indexHashedNodesCoordinator) getLastEpochConfig() uint32 { } func (ihnc *indexHashedNodesCoordinator) registryToNodesCoordinator( - config *NodesCoordinatorRegistry, + config NodesCoordinatorRegistryHandler, ) (map[uint32]*epochNodesConfig, error) { var err error var epoch int64 result := make(map[uint32]*epochNodesConfig) - for epochStr, epochValidators := range config.EpochsConfig { + for epochStr, epochValidators := range config.GetEpochsConfig() { epoch, err = strconv.ParseInt(epochStr, 10, 64) if err != nil { return nil, err @@ -197,25 +187,34 @@ func epochNodesConfigToEpochValidators(config *epochNodesConfig) *EpochValidator return result } -func epochValidatorsToEpochNodesConfig(config *EpochValidators) (*epochNodesConfig, error) { +func epochValidatorsToEpochNodesConfig(config EpochValidatorsHandler) (*epochNodesConfig, error) { result := &epochNodesConfig{} var err error - result.eligibleMap, err = serializableValidatorsMapToValidatorsMap(config.EligibleValidators) + result.eligibleMap, err = serializableValidatorsMapToValidatorsMap(config.GetEligibleValidators()) if err != nil { return nil, err } - result.waitingMap, err = serializableValidatorsMapToValidatorsMap(config.WaitingValidators) + result.waitingMap, err = serializableValidatorsMapToValidatorsMap(config.GetWaitingValidators()) if err != nil { return nil, err } - result.leavingMap, err = serializableValidatorsMapToValidatorsMap(config.LeavingValidators) + result.leavingMap, err = serializableValidatorsMapToValidatorsMap(config.GetLeavingValidators()) if err != nil { return nil, err } + configWithAuction, castOk := config.(EpochValidatorsHandlerWithAuction) + if castOk { + result.shuffledOutMap, err = serializableValidatorsMapToValidatorsMap(configWithAuction.GetShuffledOutValidators()) + if err != nil { + return nil, err + } + result.lowWaitingList = configWithAuction.GetLowWaitingList() + } + return result, nil } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistryWithAuction.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistryWithAuction.go new file mode 100644 index 00000000000..76deba81eaa --- /dev/null +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistryWithAuction.go @@ -0,0 +1,56 @@ +package nodesCoordinator + +import ( + "fmt" +) + +// nodesCoordinatorToRegistryWithAuction will export the nodesCoordinator data to the registry which contains auction list +func (ihnc *indexHashedNodesCoordinator) nodesCoordinatorToRegistryWithAuction() *NodesCoordinatorRegistryWithAuction { + ihnc.mutNodesConfig.RLock() + defer ihnc.mutNodesConfig.RUnlock() + + registry := &NodesCoordinatorRegistryWithAuction{ + CurrentEpoch: ihnc.currentEpoch, + EpochsConfigWithAuction: make(map[string]*EpochValidatorsWithAuction), + } + + minEpoch, lastEpoch := ihnc.getMinAndLastEpoch() + for epoch := minEpoch; epoch <= lastEpoch; epoch++ { + epochNodesData, ok := ihnc.nodesConfig[epoch] + if !ok { + continue + } + + registry.EpochsConfigWithAuction[fmt.Sprint(epoch)] = epochNodesConfigToEpochValidatorsWithAuction(epochNodesData) + } + + return registry +} + +func epochNodesConfigToEpochValidatorsWithAuction(config *epochNodesConfig) *EpochValidatorsWithAuction { + result := &EpochValidatorsWithAuction{ + Eligible: make(map[string]Validators, len(config.eligibleMap)), + Waiting: make(map[string]Validators, len(config.waitingMap)), + Leaving: make(map[string]Validators, len(config.leavingMap)), + ShuffledOut: make(map[string]Validators, len(config.shuffledOutMap)), + LowWaitingList: config.lowWaitingList, + } + + for k, v := range config.eligibleMap { + result.Eligible[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + for k, v := range config.waitingMap { + result.Waiting[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + for k, v := range config.leavingMap { + result.Leaving[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + for k, v := range config.shuffledOutMap { + result.ShuffledOut[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + return result +} diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go index 348c7a74280..0a91ba9170a 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go @@ -6,8 +6,12 @@ import ( "strconv" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" ) func sameValidatorsMaps(map1, map2 map[uint32][]Validator) bool { @@ -73,13 +77,23 @@ func validatorsEqualSerializableValidators(validators []Validator, sValidators [ } func TestIndexHashedNodesCoordinator_LoadStateAfterSave(t *testing.T) { + t.Parallel() + args := createArguments() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.StakingV4Step2Flag { + return stakingV4Epoch + } + return 0 + }, + } nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) expectedConfig := nodesCoordinator.nodesConfig[0] key := []byte("config") - err := nodesCoordinator.saveState(key) + err := nodesCoordinator.saveState(key, 0) assert.Nil(t, err) delete(nodesCoordinator.nodesConfig, 0) @@ -94,26 +108,106 @@ func TestIndexHashedNodesCoordinator_LoadStateAfterSave(t *testing.T) { assert.True(t, sameValidatorsMaps(expectedConfig.waitingMap, actualConfig.waitingMap)) } -func TestIndexHashedNodesCooridinator_nodesCoordinatorToRegistry(t *testing.T) { +func TestIndexHashedNodesCoordinator_LoadStateAfterSaveWithStakingV4(t *testing.T) { + t.Parallel() + + args := createArguments() + args.Epoch = stakingV4Epoch + nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) + + nodesCoordinator.nodesConfig[stakingV4Epoch].leavingMap = createDummyNodesMap(3, 0, string(common.LeavingList)) + nodesCoordinator.nodesConfig[stakingV4Epoch].shuffledOutMap = createDummyNodesMap(3, 0, string(common.SelectedFromAuctionList)) + expectedConfig := nodesCoordinator.nodesConfig[stakingV4Epoch] + + key := []byte("config") + err := nodesCoordinator.saveState(key, stakingV4Epoch) + assert.Nil(t, err) + + delete(nodesCoordinator.nodesConfig, 0) + err = nodesCoordinator.LoadState(key) + assert.Nil(t, err) + + actualConfig := nodesCoordinator.nodesConfig[stakingV4Epoch] + assert.Equal(t, expectedConfig.shardID, actualConfig.shardID) + assert.Equal(t, expectedConfig.nbShards, actualConfig.nbShards) + assert.True(t, sameValidatorsMaps(expectedConfig.eligibleMap, actualConfig.eligibleMap)) + assert.True(t, sameValidatorsMaps(expectedConfig.waitingMap, actualConfig.waitingMap)) + assert.True(t, sameValidatorsMaps(expectedConfig.shuffledOutMap, actualConfig.shuffledOutMap)) + assert.True(t, sameValidatorsMaps(expectedConfig.leavingMap, actualConfig.leavingMap)) +} + +func TestIndexHashedNodesCoordinator_nodesCoordinatorToRegistryWithStakingV4(t *testing.T) { args := createArguments() + args.Epoch = stakingV4Epoch nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) - ncr := nodesCoordinator.NodesCoordinatorToRegistry() + nodesCoordinator.nodesConfig[stakingV4Epoch].leavingMap = createDummyNodesMap(3, 0, string(common.LeavingList)) + nodesCoordinator.nodesConfig[stakingV4Epoch].shuffledOutMap = createDummyNodesMap(3, 0, string(common.SelectedFromAuctionList)) + + ncr := nodesCoordinator.NodesCoordinatorToRegistry(stakingV4Epoch) nc := nodesCoordinator.nodesConfig - assert.Equal(t, nodesCoordinator.currentEpoch, ncr.CurrentEpoch) - assert.Equal(t, len(nodesCoordinator.nodesConfig), len(ncr.EpochsConfig)) + assert.Equal(t, nodesCoordinator.currentEpoch, ncr.GetCurrentEpoch()) + assert.Equal(t, len(nodesCoordinator.nodesConfig), len(ncr.GetEpochsConfig())) for epoch, config := range nc { - assert.True(t, sameValidatorsDifferentMapTypes(config.eligibleMap, ncr.EpochsConfig[fmt.Sprint(epoch)].EligibleValidators)) - assert.True(t, sameValidatorsDifferentMapTypes(config.waitingMap, ncr.EpochsConfig[fmt.Sprint(epoch)].WaitingValidators)) + ncrWithAuction := ncr.GetEpochsConfig()[fmt.Sprint(epoch)].(EpochValidatorsHandlerWithAuction) + assert.True(t, sameValidatorsDifferentMapTypes(config.waitingMap, ncrWithAuction.GetWaitingValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.leavingMap, ncrWithAuction.GetLeavingValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.eligibleMap, ncrWithAuction.GetEligibleValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.shuffledOutMap, ncrWithAuction.GetShuffledOutValidators())) + } +} + +func TestIndexHashedNodesCoordinator_nodesCoordinatorToRegistry(t *testing.T) { + args := createArguments() + nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) + + ncr := nodesCoordinator.NodesCoordinatorToRegistry(args.Epoch) + nc := nodesCoordinator.nodesConfig + + assert.Equal(t, nodesCoordinator.currentEpoch, ncr.GetCurrentEpoch()) + assert.Equal(t, len(nodesCoordinator.nodesConfig), len(ncr.GetEpochsConfig())) + + for epoch, config := range nc { + assert.True(t, sameValidatorsDifferentMapTypes(config.eligibleMap, ncr.GetEpochsConfig()[fmt.Sprint(epoch)].GetEligibleValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.waitingMap, ncr.GetEpochsConfig()[fmt.Sprint(epoch)].GetWaitingValidators())) + } +} + +func TestIndexHashedNodesCoordinator_nodesCoordinatorWithAuctionToRegistryAndBack(t *testing.T) { + args := createArguments() + nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) + + nodesConfigForEpoch := nodesCoordinator.nodesConfig[args.Epoch] + nodesConfigForEpoch.shuffledOutMap = createDummyNodesMap(3, 0, string(common.WaitingList)) + nodesConfigForEpoch.lowWaitingList = true + // leave only one epoch config in nc + nodesCoordinator.nodesConfig = make(map[uint32]*epochNodesConfig) + nodesCoordinator.nodesConfig[args.Epoch] = nodesConfigForEpoch + + ncr := nodesCoordinator.nodesCoordinatorToRegistryWithAuction() + require.True(t, sameValidatorsDifferentMapTypes(nodesConfigForEpoch.eligibleMap, ncr.GetEpochsConfig()[fmt.Sprint(args.Epoch)].GetEligibleValidators())) + require.True(t, sameValidatorsDifferentMapTypes(nodesConfigForEpoch.waitingMap, ncr.GetEpochsConfig()[fmt.Sprint(args.Epoch)].GetWaitingValidators())) + require.True(t, sameValidatorsDifferentMapTypes(nodesConfigForEpoch.shuffledOutMap, ncr.GetEpochsConfigWithAuction()[fmt.Sprint(args.Epoch)].GetShuffledOutValidators())) + require.Equal(t, nodesConfigForEpoch.lowWaitingList, ncr.GetEpochsConfigWithAuction()[fmt.Sprint(args.Epoch)].GetLowWaitingList()) + + nodesConfig, err := nodesCoordinator.registryToNodesCoordinator(ncr) + require.Nil(t, err) + + assert.Equal(t, len(nodesCoordinator.nodesConfig), len(nodesConfig)) + for epoch, config := range nodesCoordinator.nodesConfig { + require.True(t, sameValidatorsMaps(config.eligibleMap, nodesConfig[epoch].eligibleMap)) + require.True(t, sameValidatorsMaps(config.waitingMap, nodesConfig[epoch].waitingMap)) + require.True(t, sameValidatorsMaps(config.shuffledOutMap, nodesConfig[epoch].shuffledOutMap)) + require.Equal(t, config.lowWaitingList, nodesConfig[epoch].lowWaitingList) } } func TestIndexHashedNodesCoordinator_registryToNodesCoordinator(t *testing.T) { args := createArguments() nodesCoordinator1, _ := NewIndexHashedNodesCoordinator(args) - ncr := nodesCoordinator1.NodesCoordinatorToRegistry() + ncr := nodesCoordinator1.NodesCoordinatorToRegistry(args.Epoch) args = createArguments() nodesCoordinator2, _ := NewIndexHashedNodesCoordinator(args) @@ -147,17 +241,17 @@ func TestIndexHashedNodesCooridinator_nodesCoordinatorToRegistryLimitNumEpochsIn } } - ncr := nodesCoordinator.NodesCoordinatorToRegistry() + ncr := nodesCoordinator.NodesCoordinatorToRegistry(args.Epoch) nc := nodesCoordinator.nodesConfig - require.Equal(t, nodesCoordinator.currentEpoch, ncr.CurrentEpoch) - require.Equal(t, nodesCoordinatorStoredEpochs, len(ncr.EpochsConfig)) + require.Equal(t, nodesCoordinator.currentEpoch, ncr.GetCurrentEpoch()) + require.Equal(t, nodesCoordinatorStoredEpochs, len(ncr.GetEpochsConfig())) - for epochStr := range ncr.EpochsConfig { + for epochStr := range ncr.GetEpochsConfig() { epoch, err := strconv.Atoi(epochStr) require.Nil(t, err) - require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].eligibleMap, ncr.EpochsConfig[epochStr].EligibleValidators)) - require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].waitingMap, ncr.EpochsConfig[epochStr].WaitingValidators)) + require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].eligibleMap, ncr.GetEpochsConfig()[epochStr].GetEligibleValidators())) + require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].waitingMap, ncr.GetEpochsConfig()[epochStr].GetWaitingValidators())) } } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go index c9e4779e73f..689fe95d341 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go @@ -76,7 +76,7 @@ func (ihnc *indexHashedNodesCoordinatorWithRater) ComputeAdditionalLeaving(allVa return extraLeavingNodesMap, nil } -//IsInterfaceNil verifies that the underlying value is nil +// IsInterfaceNil verifies that the underlying value is nil func (ihnc *indexHashedNodesCoordinatorWithRater) IsInterfaceNil() bool { return ihnc == nil } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go index dfd1bbbe2ad..a80006cceae 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go @@ -13,6 +13,9 @@ import ( "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-core-go/hashing/blake2b" "github.com/multiversx/mx-chain-core-go/hashing/sha256" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/sharding/mock" "github.com/multiversx/mx-chain-go/state" @@ -20,8 +23,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestNewIndexHashedNodesCoordinatorWithRater_NilRaterShouldErr(t *testing.T) { @@ -48,14 +49,14 @@ func TestNewIndexHashedGroupSelectorWithRater_OkValsShouldWork(t *testing.T) { assert.Nil(t, err) } -//------- LoadEligibleList +// ------- LoadEligibleList func TestIndexHashedGroupSelectorWithRater_SetNilEligibleMapShouldErr(t *testing.T) { t.Parallel() waiting := createDummyNodesMap(2, 1, "waiting") nc, _ := NewIndexHashedNodesCoordinator(createArguments()) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) - assert.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waiting, nil, 0)) + assert.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waiting, nil, nil, 0, false)) } func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { @@ -79,25 +80,26 @@ func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 2, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("test"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 2, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("test"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, err := NewIndexHashedNodesCoordinator(arguments) assert.Nil(t, err) @@ -112,7 +114,7 @@ func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { assert.Equal(t, eligibleMap[0], readEligible) } -//------- functionality tests +// ------- functionality tests func TestIndexHashedGroupSelectorWithRater_ComputeValidatorsGroup1ValidatorShouldNotCallGetRating(t *testing.T) { t.Parallel() @@ -148,7 +150,7 @@ func BenchmarkIndexHashedGroupSelectorWithRater_ComputeValidatorsGroup63of400(b consensusGroupSize := 63 list := make([]Validator, 0) - //generate 400 validators + // generate 400 validators for i := 0; i < 400; i++ { list = append(list, newValidatorMock([]byte("pk"+strconv.Itoa(i)), 1, defaultSelectionChances)) } @@ -218,7 +220,7 @@ func Test_ComputeValidatorsGroup63of400(t *testing.T) { shardSize := uint32(400) list := make([]Validator, 0) - //generate 400 validators + // generate 400 validators for i := uint32(0); i < shardSize; i++ { list = append(list, newValidatorMock([]byte(fmt.Sprintf("pk%v", i)), 1, defaultSelectionChances)) } @@ -328,25 +330,26 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -383,25 +386,26 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -452,25 +456,26 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldWork(t eligibleMap[1] = listShard1 arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -537,26 +542,27 @@ func TestIndexHashedGroupSelectorWithRater_GetAllEligibleValidatorsPublicKeys(t eligibleMap[shardOneId] = listShard1 arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -780,9 +786,9 @@ func BenchmarkIndexHashedGroupSelectorWithRater_TestExpandList(b *testing.B) { } } - //a := []int{1, 2, 3, 4, 5, 6, 7, 8} - rand.Seed(time.Now().UnixNano()) - rand.Shuffle(len(array), func(i, j int) { array[i], array[j] = array[j], array[i] }) + // a := []int{1, 2, 3, 4, 5, 6, 7, 8} + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + rnd.Shuffle(len(array), func(i, j int) { array[i], array[j] = array[j], array[i] }) m2 := runtime.MemStats{} runtime.ReadMemStats(&m2) @@ -815,7 +821,7 @@ func BenchmarkIndexHashedWithRaterGroupSelector_ComputeValidatorsGroup21of400(b consensusGroupSize := 21 list := make([]Validator, 0) - //generate 400 validators + // generate 400 validators for i := 0; i < 400; i++ { list = append(list, newValidatorMock([]byte("pk"+strconv.Itoa(i)), 1, defaultSelectionChances)) } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go index c1c01a67680..32cc2ca8326 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go @@ -20,6 +20,9 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/hashing/sha256" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" "github.com/multiversx/mx-chain-go/epochStart" @@ -31,10 +34,10 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) +const stakingV4Epoch = 444 + func createDummyNodesList(nbNodes uint32, suffix string) []Validator { list := make([]Validator, 0) hasher := sha256.NewSha256() @@ -82,6 +85,14 @@ func isStringSubgroup(a []string, b []string) bool { return found } +func createNodesCoordinatorRegistryFactory() NodesCoordinatorRegistryFactory { + ncf, _ := NewNodesCoordinatorRegistryFactory( + &marshal.GogoProtoMarshalizer{}, + stakingV4Epoch, + ) + return ncf +} + func createArguments() ArgNodesCoordinator { nbShards := uint32(1) eligibleMap := createDummyNodesMap(10, nbShards, "eligible") @@ -92,7 +103,6 @@ func createArguments() ArgNodesCoordinator { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } nodeShuffler, _ := NewHashValidatorsShuffler(shufflerArgs) @@ -120,8 +130,9 @@ func createArguments() ArgNodesCoordinator { EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ IsRefactorPeersMiniBlocksFlagEnabledField: true, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } return arguments } @@ -135,7 +146,7 @@ func validatorsPubKeys(validators []Validator) []string { return pKeys } -//------- NewIndexHashedNodesCoordinator +// ------- NewIndexHashedNodesCoordinator func TestNewIndexHashedNodesCoordinator_NilHasherShouldErr(t *testing.T) { t.Parallel() @@ -237,7 +248,7 @@ func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { require.Nil(t, err) } -//------- LoadEligibleList +// ------- LoadEligibleList func TestIndexHashedNodesCoordinator_SetNilEligibleMapShouldErr(t *testing.T) { t.Parallel() @@ -246,7 +257,7 @@ func TestIndexHashedNodesCoordinator_SetNilEligibleMapShouldErr(t *testing.T) { arguments := createArguments() ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waitingMap, nil, 0)) + require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waitingMap, nil, nil, 0, false)) } func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) { @@ -256,7 +267,7 @@ func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) { arguments := createArguments() ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(eligibleMap, nil, nil, 0)) + require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(eligibleMap, nil, nil, nil, 0, false)) } func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { @@ -281,24 +292,25 @@ func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 2, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 2, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -308,7 +320,7 @@ func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { require.Equal(t, eligibleMap[0], readEligible) } -//------- ComputeValidatorsGroup +// ------- ComputeValidatorsGroup func TestIndexHashedNodesCoordinator_NewCoordinatorGroup0SizeShouldErr(t *testing.T) { t.Parallel() @@ -342,24 +354,25 @@ func TestIndexHashedNodesCoordinator_NewCoordinatorTooFewNodesShouldErr(t *testi bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 10, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 10, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -389,7 +402,7 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroupInvalidShardIdShouldE require.Nil(t, list2) } -//------- functionality tests +// ------- functionality tests func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup1ValidatorShouldReturnSame(t *testing.T) { t.Parallel() @@ -417,24 +430,25 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup1ValidatorShouldRetur bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: nodesMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: nodesMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) list2, err := ihnc.ComputeConsensusGroup([]byte("randomness"), 0, 0, 0) @@ -478,24 +492,25 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10locksNoM } arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -544,7 +559,7 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10BlocksMe mut := sync.Mutex{} - //consensusGroup := list[0:21] + // consensusGroup := list[0:21] cacheMap := make(map[string]interface{}) lruCache := &mock.NodesCoordinatorCacheMock{ PutCalled: func(key []byte, value interface{}, sizeInBytes int) (evicted bool) { @@ -567,24 +582,25 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10BlocksMe } arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -970,24 +986,25 @@ func TestIndexHashedNodesCoordinator_GetValidatorWithPublicKeyShouldWork(t *test bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1054,25 +1071,26 @@ func TestIndexHashedGroupSelector_GetAllEligibleValidatorsPublicKeys(t *testing. bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1134,25 +1152,26 @@ func TestIndexHashedGroupSelector_GetAllWaitingValidatorsPublicKeys(t *testing.T eligibleMap[shardZeroId] = []Validator{&validator{}} arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1257,7 +1276,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldTriggerWrongConfigur }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2, false) require.NoError(t, err) value := <-chanStopNode @@ -1283,7 +1302,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldNotTriggerWrongConfi }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2, false) require.NoError(t, err) require.Empty(t, chanStopNode) @@ -1315,7 +1334,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldSetNodeTypeValidator }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2, false) require.NoError(t, err) require.True(t, setTypeWasCalled) require.Equal(t, core.NodeTypeValidator, nodeTypeResult) @@ -1347,7 +1366,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldSetNodeTypeObserver( }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2, false) require.NoError(t, err) require.True(t, setTypeWasCalled) require.Equal(t, core.NodeTypeObserver, nodeTypeResult) @@ -1389,6 +1408,36 @@ func TestIndexHashedNodesCoordinator_EpochStartInEligible(t *testing.T) { require.True(t, isValidator) } +func TestIndexHashedNodesCoordinator_computeShardForSelfPublicKeyWithStakingV4(t *testing.T) { + t.Parallel() + + arguments := createArguments() + pk := []byte("pk") + arguments.SelfPublicKey = pk + nc, _ := NewIndexHashedNodesCoordinator(arguments) + epoch := uint32(2) + + metaShard := core.MetachainShardId + nc.nodesConfig = map[uint32]*epochNodesConfig{ + epoch: { + shardID: metaShard, + shuffledOutMap: map[uint32][]Validator{ + metaShard: {newValidatorMock(pk, 1, 1)}, + }, + }, + } + + computedShardId, isValidator := nc.computeShardForSelfPublicKey(nc.nodesConfig[epoch]) + require.Equal(t, nc.shardIDAsObserver, computedShardId) + require.False(t, isValidator) + + nc.flagStakingV4Step2.SetValue(true) + + computedShardId, isValidator = nc.computeShardForSelfPublicKey(nc.nodesConfig[epoch]) + require.Equal(t, metaShard, computedShardId) + require.True(t, isValidator) +} + func TestIndexHashedNodesCoordinator_EpochStartInWaiting(t *testing.T) { t.Parallel() @@ -1513,8 +1562,9 @@ func TestIndexHashedNodesCoordinator_EpochStart_EligibleSortedAscendingByIndex(t EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ IsRefactorPeersMiniBlocksFlagEnabledField: true, }, - ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -2021,38 +2071,6 @@ func TestIndexHashedNodesCoordinator_ShuffleOutNilConfig(t *testing.T) { require.Equal(t, expectedShardForNotFound, newShard) } -func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNilPreviousNodesConfig(t *testing.T) { - t.Parallel() - - arguments := createArguments() - pk := []byte("pk") - arguments.SelfPublicKey = pk - ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - - ihnc.flagWaitingListFix.Reset() - validatorInfos := make([]*state.ShardValidatorInfo, 0) - newNodesConfig, err := ihnc.computeNodesConfigFromList(nil, validatorInfos) - - assert.Nil(t, newNodesConfig) - assert.False(t, errors.Is(err, ErrNilPreviousEpochConfig)) - - newNodesConfig, err = ihnc.computeNodesConfigFromList(nil, nil) - - assert.Nil(t, newNodesConfig) - assert.False(t, errors.Is(err, ErrNilPreviousEpochConfig)) - - _ = ihnc.flagWaitingListFix.SetReturningPrevious() - newNodesConfig, err = ihnc.computeNodesConfigFromList(nil, validatorInfos) - - assert.Nil(t, newNodesConfig) - assert.True(t, errors.Is(err, ErrNilPreviousEpochConfig)) - - newNodesConfig, err = ihnc.computeNodesConfigFromList(nil, nil) - - assert.Nil(t, newNodesConfig) - assert.True(t, errors.Is(err, ErrNilPreviousEpochConfig)) -} - func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNoValidators(t *testing.T) { t.Parallel() @@ -2062,12 +2080,12 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNoValidators(t *t ihnc, _ := NewIndexHashedNodesCoordinator(arguments) validatorInfos := make([]*state.ShardValidatorInfo, 0) - newNodesConfig, err := ihnc.computeNodesConfigFromList(&epochNodesConfig{}, validatorInfos) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, newNodesConfig) assert.True(t, errors.Is(err, ErrMapSizeZero)) - newNodesConfig, err = ihnc.computeNodesConfigFromList(&epochNodesConfig{}, nil) + newNodesConfig, err = ihnc.computeNodesConfigFromList(nil) assert.Nil(t, newNodesConfig) assert.True(t, errors.Is(err, ErrMapSizeZero)) @@ -2099,13 +2117,62 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNilPk(t *testing. }, } - newNodesConfig, err := ihnc.computeNodesConfigFromList(&epochNodesConfig{}, validatorInfos) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, newNodesConfig) assert.NotNil(t, err) assert.Equal(t, ErrNilPubKey, err) } +func TestIndexHashedNodesCoordinator_computeNodesConfigFromListWithStakingV4(t *testing.T) { + t.Parallel() + arguments := createArguments() + nc, _ := NewIndexHashedNodesCoordinator(arguments) + + shard0Eligible := &state.ShardValidatorInfo{ + PublicKey: []byte("pk0"), + List: string(common.EligibleList), + Index: 1, + TempRating: 2, + ShardId: 0, + } + shard0Auction := &state.ShardValidatorInfo{ + PublicKey: []byte("pk1"), + List: string(common.SelectedFromAuctionList), + Index: 3, + TempRating: 2, + ShardId: 0, + } + shard1Auction := &state.ShardValidatorInfo{ + PublicKey: []byte("pk2"), + List: string(common.SelectedFromAuctionList), + Index: 2, + TempRating: 2, + ShardId: 1, + } + validatorInfos := []*state.ShardValidatorInfo{shard0Eligible, shard0Auction, shard1Auction} + + newNodesConfig, err := nc.computeNodesConfigFromList(validatorInfos) + require.Equal(t, ErrReceivedAuctionValidatorsBeforeStakingV4, err) + require.Nil(t, newNodesConfig) + + nc.updateEpochFlags(stakingV4Epoch) + + newNodesConfig, err = nc.computeNodesConfigFromList(validatorInfos) + require.Nil(t, err) + v1, _ := NewValidator([]byte("pk2"), 1, 2) + v2, _ := NewValidator([]byte("pk1"), 1, 3) + require.Equal(t, []Validator{v1, v2}, newNodesConfig.auctionList) + + validatorInfos = append(validatorInfos, &state.ShardValidatorInfo{ + PublicKey: []byte("pk3"), + List: string(common.NewList), + }) + newNodesConfig, err = nc.computeNodesConfigFromList(validatorInfos) + require.Equal(t, epochStart.ErrReceivedNewListNodeInStakingV4, err) + require.Nil(t, newNodesConfig) +} + func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix(t *testing.T) { t.Parallel() @@ -2113,7 +2180,7 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix pk := []byte("pk") arguments.SelfPublicKey = pk ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - _ = ihnc.flagWaitingListFix.SetReturningPrevious() + _ = ihnc.flagStakingV4Started.SetReturningPrevious() shard0Eligible0 := &state.ShardValidatorInfo{ PublicKey: []byte("pk0"), @@ -2154,15 +2221,18 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix ShardId: 0, } shard0Leaving0 := &state.ShardValidatorInfo{ - PublicKey: []byte("pk6"), - List: string(common.LeavingList), - ShardId: 0, + PublicKey: []byte("pk6"), + List: string(common.LeavingList), + PreviousList: string(common.EligibleList), + ShardId: 0, } shardMetaLeaving1 := &state.ShardValidatorInfo{ - PublicKey: []byte("pk7"), - List: string(common.LeavingList), - Index: 1, - ShardId: core.MetachainShardId, + PublicKey: []byte("pk7"), + List: string(common.LeavingList), + PreviousList: string(common.WaitingList), + Index: 1, + PreviousIndex: 1, + ShardId: core.MetachainShardId, } validatorInfos := @@ -2177,29 +2247,7 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix shardMetaLeaving1, } - previousConfig := &epochNodesConfig{ - eligibleMap: map[uint32][]Validator{ - 0: { - newValidatorMock(shard0Eligible0.PublicKey, 0, 0), - newValidatorMock(shard0Eligible1.PublicKey, 0, 0), - newValidatorMock(shard0Leaving0.PublicKey, 0, 0), - }, - core.MetachainShardId: { - newValidatorMock(shardmetaEligible0.PublicKey, 0, 0), - }, - }, - waitingMap: map[uint32][]Validator{ - 0: { - newValidatorMock(shard0Waiting0.PublicKey, 0, 0), - }, - core.MetachainShardId: { - newValidatorMock(shardmetaWaiting0.PublicKey, 0, 0), - newValidatorMock(shardMetaLeaving1.PublicKey, 0, 0), - }, - }, - } - - newNodesConfig, err := ihnc.computeNodesConfigFromList(previousConfig, validatorInfos) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, err) assert.Equal(t, uint32(1), newNodesConfig.nbShards) @@ -2293,10 +2341,6 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsNoFix(t ShardId: core.MetachainShardId, } - previousConfig := &epochNodesConfig{ - eligibleMap: map[uint32][]Validator{}, - } - validatorInfos := []*state.ShardValidatorInfo{ shard0Eligible0, @@ -2309,8 +2353,8 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsNoFix(t shardMetaLeaving1, } - ihnc.flagWaitingListFix.Reset() - newNodesConfig, err := ihnc.computeNodesConfigFromList(previousConfig, validatorInfos) + ihnc.flagStakingV4Started.Reset() + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, err) assert.Equal(t, uint32(1), newNodesConfig.nbShards) @@ -2509,8 +2553,9 @@ func TestIndexHashedGroupSelector_GetWaitingEpochsLeftForPublicKey(t *testing.T) EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ CurrentEpoch: 1, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -2585,6 +2630,7 @@ func TestIndexHashedGroupSelector_GetWaitingEpochsLeftForPublicKey(t *testing.T) return 0 }, }, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -2669,6 +2715,7 @@ func TestIndexHashedGroupSelector_GetWaitingEpochsLeftForPublicKey(t *testing.T) return 2 }, }, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) diff --git a/sharding/nodesCoordinator/interface.go b/sharding/nodesCoordinator/interface.go index 0c16a505364..5e2d5564a5c 100644 --- a/sharding/nodesCoordinator/interface.go +++ b/sharding/nodesCoordinator/interface.go @@ -3,6 +3,8 @@ package nodesCoordinator import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/state" ) @@ -46,6 +48,8 @@ type PublicKeysSelector interface { GetAllEligibleValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetAllWaitingValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetAllLeavingValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) + GetAllShuffledOutValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) + GetShuffledOutToAuctionValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetConsensusValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetOwnPublicKey() []byte } @@ -66,9 +70,9 @@ type NodesCoordinatorHelper interface { // ChanceComputer provides chance computation capabilities based on a rating type ChanceComputer interface { - //GetChance returns the chances for the rating + // GetChance returns the chances for the rating GetChance(uint32) uint32 - //IsInterfaceNil verifies if the interface is nil + // IsInterfaceNil verifies if the interface is nil IsInterfaceNil() bool } @@ -138,3 +142,39 @@ type GenesisNodesSetupHandler interface { MinMetaHysteresisNodes() uint32 IsInterfaceNil() bool } + +// EpochValidatorsHandler defines what one epoch configuration for a nodes coordinator should hold +type EpochValidatorsHandler interface { + GetEligibleValidators() map[string][]*SerializableValidator + GetWaitingValidators() map[string][]*SerializableValidator + GetLeavingValidators() map[string][]*SerializableValidator +} + +// EpochValidatorsHandlerWithAuction defines what one epoch configuration for a nodes coordinator should hold + shuffled out validators +type EpochValidatorsHandlerWithAuction interface { + EpochValidatorsHandler + GetShuffledOutValidators() map[string][]*SerializableValidator + GetLowWaitingList() bool +} + +// NodesCoordinatorRegistryHandler defines what is used to initialize nodes coordinator +type NodesCoordinatorRegistryHandler interface { + GetEpochsConfig() map[string]EpochValidatorsHandler + GetCurrentEpoch() uint32 + SetCurrentEpoch(epoch uint32) +} + +// NodesCoordinatorRegistryFactory handles NodesCoordinatorRegistryHandler marshall/unmarshall +type NodesCoordinatorRegistryFactory interface { + CreateNodesCoordinatorRegistry(buff []byte) (NodesCoordinatorRegistryHandler, error) + GetRegistryData(registry NodesCoordinatorRegistryHandler, epoch uint32) ([]byte, error) + IsInterfaceNil() bool +} + +// EpochNotifier can notify upon an epoch change and provide the current epoch +type EpochNotifier interface { + RegisterNotifyHandler(handler vmcommon.EpochSubscriberHandler) + CurrentEpoch() uint32 + CheckEpoch(header data.HeaderHandler) + IsInterfaceNil() bool +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistry.go b/sharding/nodesCoordinator/nodesCoordinatorRegistry.go new file mode 100644 index 00000000000..fbf84919d7a --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistry.go @@ -0,0 +1,49 @@ +package nodesCoordinator + +// EpochValidators holds one epoch configuration for a nodes coordinator +type EpochValidators struct { + EligibleValidators map[string][]*SerializableValidator `json:"eligibleValidators"` + WaitingValidators map[string][]*SerializableValidator `json:"waitingValidators"` + LeavingValidators map[string][]*SerializableValidator `json:"leavingValidators"` +} + +// GetEligibleValidators returns all eligible validators from all shards +func (ev *EpochValidators) GetEligibleValidators() map[string][]*SerializableValidator { + return ev.EligibleValidators +} + +// GetWaitingValidators returns all waiting validators from all shards +func (ev *EpochValidators) GetWaitingValidators() map[string][]*SerializableValidator { + return ev.WaitingValidators +} + +// GetLeavingValidators returns all leaving validators from all shards +func (ev *EpochValidators) GetLeavingValidators() map[string][]*SerializableValidator { + return ev.LeavingValidators +} + +// NodesCoordinatorRegistry holds the data that can be used to initialize a nodes coordinator +type NodesCoordinatorRegistry struct { + EpochsConfig map[string]*EpochValidators `json:"epochConfigs"` + CurrentEpoch uint32 `json:"currentEpoch"` +} + +// GetCurrentEpoch returns the current epoch +func (ncr *NodesCoordinatorRegistry) GetCurrentEpoch() uint32 { + return ncr.CurrentEpoch +} + +// GetEpochsConfig returns epoch-validators configuration +func (ncr *NodesCoordinatorRegistry) GetEpochsConfig() map[string]EpochValidatorsHandler { + ret := make(map[string]EpochValidatorsHandler) + for epoch, config := range ncr.EpochsConfig { + ret[epoch] = config + } + + return ret +} + +// SetCurrentEpoch sets internally the current epoch +func (ncr *NodesCoordinatorRegistry) SetCurrentEpoch(epoch uint32) { + ncr.CurrentEpoch = epoch +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryFactory.go b/sharding/nodesCoordinator/nodesCoordinatorRegistryFactory.go new file mode 100644 index 00000000000..894d3f7a1f0 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryFactory.go @@ -0,0 +1,80 @@ +package nodesCoordinator + +import ( + "encoding/json" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/marshal" +) + +type nodesCoordinatorRegistryFactory struct { + marshaller marshal.Marshalizer + stakingV4Step2EnableEpoch uint32 +} + +// NewNodesCoordinatorRegistryFactory creates a nodes coordinator registry factory which will create a +// NodesCoordinatorRegistryHandler from a buffer depending on the epoch +func NewNodesCoordinatorRegistryFactory( + marshaller marshal.Marshalizer, + stakingV4Step2EnableEpoch uint32, +) (*nodesCoordinatorRegistryFactory, error) { + if check.IfNil(marshaller) { + return nil, ErrNilMarshalizer + } + + return &nodesCoordinatorRegistryFactory{ + marshaller: marshaller, + stakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, nil +} + +// CreateNodesCoordinatorRegistry creates a NodesCoordinatorRegistryHandler depending on the buffer. Old version uses +// NodesCoordinatorRegistry with a json marshaller; while the new version(from staking v4) uses NodesCoordinatorRegistryWithAuction +// with proto marshaller +func (ncf *nodesCoordinatorRegistryFactory) CreateNodesCoordinatorRegistry(buff []byte) (NodesCoordinatorRegistryHandler, error) { + registry, err := ncf.createRegistryWithAuction(buff) + if err == nil { + log.Debug("nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry created registry with auction", + "epoch", registry.CurrentEpoch) + return registry, nil + } + log.Debug("nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry creating old registry") + return createOldRegistry(buff) +} + +func (ncf *nodesCoordinatorRegistryFactory) createRegistryWithAuction(buff []byte) (*NodesCoordinatorRegistryWithAuction, error) { + registry := &NodesCoordinatorRegistryWithAuction{} + err := ncf.marshaller.Unmarshal(registry, buff) + if err != nil { + return nil, err + } + + log.Debug("nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry created registry with auction", + "epoch", registry.CurrentEpoch) + return registry, nil +} + +func createOldRegistry(buff []byte) (*NodesCoordinatorRegistry, error) { + registry := &NodesCoordinatorRegistry{} + err := json.Unmarshal(buff, registry) + if err != nil { + return nil, err + } + + return registry, nil +} + +// GetRegistryData returns the registry data as buffer. Old version uses json marshaller, while new version uses proto marshaller +func (ncf *nodesCoordinatorRegistryFactory) GetRegistryData(registry NodesCoordinatorRegistryHandler, epoch uint32) ([]byte, error) { + if epoch >= ncf.stakingV4Step2EnableEpoch { + log.Debug("nodesCoordinatorRegistryFactory.GetRegistryData called with auction after staking v4", "epoch", epoch) + return ncf.marshaller.Marshal(registry) + } + log.Debug("nodesCoordinatorRegistryFactory.GetRegistryData called with old json before staking v4", "epoch", epoch) + return json.Marshal(registry) +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ncf *nodesCoordinatorRegistryFactory) IsInterfaceNil() bool { + return ncf == nil +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.go b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.go new file mode 100644 index 00000000000..d9bea843a16 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.go @@ -0,0 +1,47 @@ +//go:generate protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/multiversx/protobuf/protobuf --gogoslick_out=. nodesCoordinatorRegistryWithAuction.proto +package nodesCoordinator + +func protoValidatorsMapToSliceMap(validators map[string]Validators) map[string][]*SerializableValidator { + ret := make(map[string][]*SerializableValidator) + + for shardID, val := range validators { + ret[shardID] = val.GetData() + } + + return ret +} + +// GetEligibleValidators returns all eligible validators from all shards +func (m *EpochValidatorsWithAuction) GetEligibleValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetEligible()) +} + +// GetWaitingValidators returns all waiting validators from all shards +func (m *EpochValidatorsWithAuction) GetWaitingValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetWaiting()) +} + +// GetLeavingValidators returns all leaving validators from all shards +func (m *EpochValidatorsWithAuction) GetLeavingValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetLeaving()) +} + +// GetShuffledOutValidators returns all shuffled out validators from all shards +func (m *EpochValidatorsWithAuction) GetShuffledOutValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetShuffledOut()) +} + +// GetEpochsConfig returns epoch-validators configuration +func (m *NodesCoordinatorRegistryWithAuction) GetEpochsConfig() map[string]EpochValidatorsHandler { + ret := make(map[string]EpochValidatorsHandler) + for epoch, config := range m.GetEpochsConfigWithAuction() { + ret[epoch] = config + } + + return ret +} + +// SetCurrentEpoch sets internally the current epoch +func (m *NodesCoordinatorRegistryWithAuction) SetCurrentEpoch(epoch uint32) { + m.CurrentEpoch = epoch +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.pb.go b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.pb.go new file mode 100644 index 00000000000..3fa0434075a --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.pb.go @@ -0,0 +1,2175 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: nodesCoordinatorRegistryWithAuction.proto + +package nodesCoordinator + +import ( + bytes "bytes" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type SerializableValidator struct { + PubKey []byte `protobuf:"bytes,1,opt,name=PubKey,proto3" json:"pubKey"` + Chances uint32 `protobuf:"varint,2,opt,name=Chances,proto3" json:"chances"` + Index uint32 `protobuf:"varint,3,opt,name=Index,proto3" json:"index"` +} + +func (m *SerializableValidator) Reset() { *m = SerializableValidator{} } +func (*SerializableValidator) ProtoMessage() {} +func (*SerializableValidator) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{0} +} +func (m *SerializableValidator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SerializableValidator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SerializableValidator) XXX_Merge(src proto.Message) { + xxx_messageInfo_SerializableValidator.Merge(m, src) +} +func (m *SerializableValidator) XXX_Size() int { + return m.Size() +} +func (m *SerializableValidator) XXX_DiscardUnknown() { + xxx_messageInfo_SerializableValidator.DiscardUnknown(m) +} + +var xxx_messageInfo_SerializableValidator proto.InternalMessageInfo + +func (m *SerializableValidator) GetPubKey() []byte { + if m != nil { + return m.PubKey + } + return nil +} + +func (m *SerializableValidator) GetChances() uint32 { + if m != nil { + return m.Chances + } + return 0 +} + +func (m *SerializableValidator) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +type Validators struct { + Data []*SerializableValidator `protobuf:"bytes,1,rep,name=Data,proto3" json:"Data,omitempty"` +} + +func (m *Validators) Reset() { *m = Validators{} } +func (*Validators) ProtoMessage() {} +func (*Validators) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{1} +} +func (m *Validators) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Validators) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Validators) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validators.Merge(m, src) +} +func (m *Validators) XXX_Size() int { + return m.Size() +} +func (m *Validators) XXX_DiscardUnknown() { + xxx_messageInfo_Validators.DiscardUnknown(m) +} + +var xxx_messageInfo_Validators proto.InternalMessageInfo + +func (m *Validators) GetData() []*SerializableValidator { + if m != nil { + return m.Data + } + return nil +} + +type EpochValidatorsWithAuction struct { + Eligible map[string]Validators `protobuf:"bytes,1,rep,name=Eligible,proto3" json:"Eligible" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Waiting map[string]Validators `protobuf:"bytes,2,rep,name=Waiting,proto3" json:"Waiting" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Leaving map[string]Validators `protobuf:"bytes,3,rep,name=Leaving,proto3" json:"Leaving" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ShuffledOut map[string]Validators `protobuf:"bytes,4,rep,name=ShuffledOut,proto3" json:"ShuffledOut" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + LowWaitingList bool `protobuf:"varint,5,opt,name=LowWaitingList,proto3" json:"LowWaitingList,omitempty"` +} + +func (m *EpochValidatorsWithAuction) Reset() { *m = EpochValidatorsWithAuction{} } +func (*EpochValidatorsWithAuction) ProtoMessage() {} +func (*EpochValidatorsWithAuction) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{2} +} +func (m *EpochValidatorsWithAuction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EpochValidatorsWithAuction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EpochValidatorsWithAuction) XXX_Merge(src proto.Message) { + xxx_messageInfo_EpochValidatorsWithAuction.Merge(m, src) +} +func (m *EpochValidatorsWithAuction) XXX_Size() int { + return m.Size() +} +func (m *EpochValidatorsWithAuction) XXX_DiscardUnknown() { + xxx_messageInfo_EpochValidatorsWithAuction.DiscardUnknown(m) +} + +var xxx_messageInfo_EpochValidatorsWithAuction proto.InternalMessageInfo + +func (m *EpochValidatorsWithAuction) GetEligible() map[string]Validators { + if m != nil { + return m.Eligible + } + return nil +} + +func (m *EpochValidatorsWithAuction) GetWaiting() map[string]Validators { + if m != nil { + return m.Waiting + } + return nil +} + +func (m *EpochValidatorsWithAuction) GetLeaving() map[string]Validators { + if m != nil { + return m.Leaving + } + return nil +} + +func (m *EpochValidatorsWithAuction) GetShuffledOut() map[string]Validators { + if m != nil { + return m.ShuffledOut + } + return nil +} + +func (m *EpochValidatorsWithAuction) GetLowWaitingList() bool { + if m != nil { + return m.LowWaitingList + } + return false +} + +type NodesCoordinatorRegistryWithAuction struct { + CurrentEpoch uint32 `protobuf:"varint,1,opt,name=CurrentEpoch,proto3" json:"CurrentEpoch,omitempty"` + EpochsConfigWithAuction map[string]*EpochValidatorsWithAuction `protobuf:"bytes,2,rep,name=EpochsConfigWithAuction,proto3" json:"EpochsConfigWithAuction,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NodesCoordinatorRegistryWithAuction) Reset() { *m = NodesCoordinatorRegistryWithAuction{} } +func (*NodesCoordinatorRegistryWithAuction) ProtoMessage() {} +func (*NodesCoordinatorRegistryWithAuction) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{3} +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodesCoordinatorRegistryWithAuction.Merge(m, src) +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Size() int { + return m.Size() +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_DiscardUnknown() { + xxx_messageInfo_NodesCoordinatorRegistryWithAuction.DiscardUnknown(m) +} + +var xxx_messageInfo_NodesCoordinatorRegistryWithAuction proto.InternalMessageInfo + +func (m *NodesCoordinatorRegistryWithAuction) GetCurrentEpoch() uint32 { + if m != nil { + return m.CurrentEpoch + } + return 0 +} + +func (m *NodesCoordinatorRegistryWithAuction) GetEpochsConfigWithAuction() map[string]*EpochValidatorsWithAuction { + if m != nil { + return m.EpochsConfigWithAuction + } + return nil +} + +func init() { + proto.RegisterType((*SerializableValidator)(nil), "proto.SerializableValidator") + proto.RegisterType((*Validators)(nil), "proto.Validators") + proto.RegisterType((*EpochValidatorsWithAuction)(nil), "proto.EpochValidatorsWithAuction") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.EligibleEntry") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.LeavingEntry") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.ShuffledOutEntry") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.WaitingEntry") + proto.RegisterType((*NodesCoordinatorRegistryWithAuction)(nil), "proto.NodesCoordinatorRegistryWithAuction") + proto.RegisterMapType((map[string]*EpochValidatorsWithAuction)(nil), "proto.NodesCoordinatorRegistryWithAuction.EpochsConfigWithAuctionEntry") +} + +func init() { + proto.RegisterFile("nodesCoordinatorRegistryWithAuction.proto", fileDescriptor_f04461c784f438d5) +} + +var fileDescriptor_f04461c784f438d5 = []byte{ + // 580 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x8e, 0xd2, 0x40, + 0x18, 0xc7, 0x3b, 0x40, 0x61, 0x77, 0x00, 0x83, 0x93, 0x18, 0x1b, 0xb2, 0x19, 0xb0, 0x46, 0xc5, + 0x83, 0xc5, 0xe0, 0x41, 0xe3, 0xc1, 0x44, 0x90, 0x18, 0x15, 0xd1, 0xed, 0x26, 0x6e, 0xb2, 0xb7, + 0x16, 0x86, 0x32, 0xb1, 0xdb, 0x21, 0xed, 0x74, 0x15, 0x4f, 0x1a, 0x5f, 0xc0, 0xc7, 0xf0, 0x21, + 0x7c, 0x80, 0x3d, 0x72, 0xf0, 0xc0, 0x89, 0x48, 0xb9, 0x18, 0x4e, 0xfb, 0x08, 0x86, 0xa1, 0xec, + 0x16, 0xb2, 0xc8, 0x26, 0xbb, 0x27, 0x66, 0xfe, 0x33, 0xff, 0xdf, 0xf7, 0xf1, 0xef, 0x97, 0x81, + 0xf7, 0x1d, 0xd6, 0x26, 0x5e, 0x8d, 0x31, 0xb7, 0x4d, 0x1d, 0x83, 0x33, 0x57, 0x27, 0x16, 0xf5, + 0xb8, 0xdb, 0xdf, 0xa7, 0xbc, 0xfb, 0xdc, 0x6f, 0x71, 0xca, 0x1c, 0xad, 0xe7, 0x32, 0xce, 0x90, + 0x2c, 0x7e, 0xf2, 0x0f, 0x2c, 0xca, 0xbb, 0xbe, 0xa9, 0xb5, 0xd8, 0x61, 0xd9, 0x62, 0x16, 0x2b, + 0x0b, 0xd9, 0xf4, 0x3b, 0x62, 0x27, 0x36, 0x62, 0x35, 0x77, 0xa9, 0xdf, 0x01, 0xbc, 0xb1, 0x47, + 0x5c, 0x6a, 0xd8, 0xf4, 0x8b, 0x61, 0xda, 0xe4, 0x83, 0x61, 0xd3, 0xf6, 0xac, 0x10, 0x52, 0x61, + 0xf2, 0xbd, 0x6f, 0xbe, 0x21, 0x7d, 0x05, 0x14, 0x41, 0x29, 0x53, 0x85, 0xd3, 0x51, 0x21, 0xd9, + 0x13, 0x8a, 0x1e, 0x9e, 0xa0, 0x3b, 0x30, 0x55, 0xeb, 0x1a, 0x4e, 0x8b, 0x78, 0x4a, 0xac, 0x08, + 0x4a, 0xd9, 0x6a, 0x7a, 0x3a, 0x2a, 0xa4, 0x5a, 0x73, 0x49, 0x5f, 0x9c, 0xa1, 0x02, 0x94, 0x5f, + 0x39, 0x6d, 0xf2, 0x59, 0x89, 0x8b, 0x4b, 0xdb, 0xd3, 0x51, 0x41, 0xa6, 0x33, 0x41, 0x9f, 0xeb, + 0xea, 0x33, 0x08, 0x4f, 0x0b, 0x7b, 0xe8, 0x21, 0x4c, 0xbc, 0x30, 0xb8, 0xa1, 0x80, 0x62, 0xbc, + 0x94, 0xae, 0xec, 0xcc, 0x3b, 0xd5, 0xce, 0xed, 0x52, 0x17, 0x37, 0xd5, 0xdf, 0x32, 0xcc, 0xd7, + 0x7b, 0xac, 0xd5, 0x3d, 0xa3, 0x44, 0x02, 0x42, 0xbb, 0x70, 0xab, 0x6e, 0x53, 0x8b, 0x9a, 0x36, + 0x09, 0xa1, 0xe5, 0x10, 0xba, 0xde, 0xa4, 0x2d, 0x1c, 0x75, 0x87, 0xbb, 0xfd, 0x6a, 0xe2, 0x78, + 0x54, 0x90, 0xf4, 0x53, 0x0c, 0x6a, 0xc2, 0xd4, 0xbe, 0x41, 0x39, 0x75, 0x2c, 0x25, 0x26, 0x88, + 0xda, 0x66, 0x62, 0x68, 0x88, 0x02, 0x17, 0x90, 0x19, 0xaf, 0x41, 0x8c, 0xa3, 0x19, 0x2f, 0x7e, + 0x51, 0x5e, 0x68, 0x58, 0xe2, 0x85, 0x1a, 0x3a, 0x80, 0xe9, 0xbd, 0xae, 0xdf, 0xe9, 0xd8, 0xa4, + 0xfd, 0xce, 0xe7, 0x4a, 0x42, 0x30, 0x2b, 0x9b, 0x99, 0x11, 0x53, 0x94, 0x1b, 0x85, 0xa1, 0xbb, + 0xf0, 0x5a, 0x83, 0x7d, 0x0a, 0x3b, 0x6f, 0x50, 0x8f, 0x2b, 0x72, 0x11, 0x94, 0xb6, 0xf4, 0x15, + 0x35, 0xdf, 0x84, 0xd9, 0xa5, 0x10, 0x51, 0x0e, 0xc6, 0x3f, 0x86, 0xf3, 0xb4, 0xad, 0xcf, 0x96, + 0xe8, 0x1e, 0x94, 0x8f, 0x0c, 0xdb, 0x27, 0x62, 0x7c, 0xd2, 0x95, 0xeb, 0x61, 0x83, 0x67, 0xbd, + 0xe9, 0xf3, 0xf3, 0xa7, 0xb1, 0x27, 0x20, 0xff, 0x16, 0x66, 0xa2, 0x11, 0x5e, 0x01, 0x2e, 0x9a, + 0xe0, 0x65, 0x71, 0xbb, 0x30, 0xb7, 0x1a, 0xde, 0x25, 0x91, 0xea, 0xaf, 0x18, 0xbc, 0xdd, 0xdc, + 0xfc, 0x00, 0x20, 0x15, 0x66, 0x6a, 0xbe, 0xeb, 0x12, 0x87, 0x8b, 0x2f, 0x2b, 0xea, 0x65, 0xf5, + 0x25, 0x0d, 0x7d, 0x03, 0xf0, 0xa6, 0x58, 0x79, 0x35, 0xe6, 0x74, 0xa8, 0x15, 0xf1, 0x87, 0x13, + 0xfc, 0x32, 0xec, 0xe5, 0x02, 0x15, 0xb5, 0x35, 0x24, 0xf1, 0xaf, 0xf5, 0x75, 0x75, 0xf2, 0x87, + 0x70, 0xe7, 0x7f, 0xc6, 0x73, 0xe2, 0x7a, 0xbc, 0x1c, 0xd7, 0xad, 0x8d, 0x03, 0x1c, 0x89, 0xaf, + 0xfa, 0x7a, 0x30, 0xc6, 0xd2, 0x70, 0x8c, 0xa5, 0x93, 0x31, 0x06, 0x5f, 0x03, 0x0c, 0x7e, 0x06, + 0x18, 0x1c, 0x07, 0x18, 0x0c, 0x02, 0x0c, 0x86, 0x01, 0x06, 0x7f, 0x02, 0x0c, 0xfe, 0x06, 0x58, + 0x3a, 0x09, 0x30, 0xf8, 0x31, 0xc1, 0xd2, 0x60, 0x82, 0xa5, 0xe1, 0x04, 0x4b, 0x07, 0xb9, 0xd5, + 0x67, 0xd7, 0x4c, 0x8a, 0xc2, 0x8f, 0xfe, 0x05, 0x00, 0x00, 0xff, 0xff, 0x65, 0xa3, 0xf9, 0xfb, + 0x91, 0x05, 0x00, 0x00, +} + +func (this *SerializableValidator) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SerializableValidator) + if !ok { + that2, ok := that.(SerializableValidator) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.PubKey, that1.PubKey) { + return false + } + if this.Chances != that1.Chances { + return false + } + if this.Index != that1.Index { + return false + } + return true +} +func (this *Validators) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Validators) + if !ok { + that2, ok := that.(Validators) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Data) != len(that1.Data) { + return false + } + for i := range this.Data { + if !this.Data[i].Equal(that1.Data[i]) { + return false + } + } + return true +} +func (this *EpochValidatorsWithAuction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EpochValidatorsWithAuction) + if !ok { + that2, ok := that.(EpochValidatorsWithAuction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Eligible) != len(that1.Eligible) { + return false + } + for i := range this.Eligible { + a := this.Eligible[i] + b := that1.Eligible[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.Waiting) != len(that1.Waiting) { + return false + } + for i := range this.Waiting { + a := this.Waiting[i] + b := that1.Waiting[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.Leaving) != len(that1.Leaving) { + return false + } + for i := range this.Leaving { + a := this.Leaving[i] + b := that1.Leaving[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.ShuffledOut) != len(that1.ShuffledOut) { + return false + } + for i := range this.ShuffledOut { + a := this.ShuffledOut[i] + b := that1.ShuffledOut[i] + if !(&a).Equal(&b) { + return false + } + } + if this.LowWaitingList != that1.LowWaitingList { + return false + } + return true +} +func (this *NodesCoordinatorRegistryWithAuction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*NodesCoordinatorRegistryWithAuction) + if !ok { + that2, ok := that.(NodesCoordinatorRegistryWithAuction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.CurrentEpoch != that1.CurrentEpoch { + return false + } + if len(this.EpochsConfigWithAuction) != len(that1.EpochsConfigWithAuction) { + return false + } + for i := range this.EpochsConfigWithAuction { + if !this.EpochsConfigWithAuction[i].Equal(that1.EpochsConfigWithAuction[i]) { + return false + } + } + return true +} +func (this *SerializableValidator) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&nodesCoordinator.SerializableValidator{") + s = append(s, "PubKey: "+fmt.Sprintf("%#v", this.PubKey)+",\n") + s = append(s, "Chances: "+fmt.Sprintf("%#v", this.Chances)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Validators) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&nodesCoordinator.Validators{") + if this.Data != nil { + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EpochValidatorsWithAuction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&nodesCoordinator.EpochValidatorsWithAuction{") + keysForEligible := make([]string, 0, len(this.Eligible)) + for k, _ := range this.Eligible { + keysForEligible = append(keysForEligible, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEligible) + mapStringForEligible := "map[string]Validators{" + for _, k := range keysForEligible { + mapStringForEligible += fmt.Sprintf("%#v: %#v,", k, this.Eligible[k]) + } + mapStringForEligible += "}" + if this.Eligible != nil { + s = append(s, "Eligible: "+mapStringForEligible+",\n") + } + keysForWaiting := make([]string, 0, len(this.Waiting)) + for k, _ := range this.Waiting { + keysForWaiting = append(keysForWaiting, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWaiting) + mapStringForWaiting := "map[string]Validators{" + for _, k := range keysForWaiting { + mapStringForWaiting += fmt.Sprintf("%#v: %#v,", k, this.Waiting[k]) + } + mapStringForWaiting += "}" + if this.Waiting != nil { + s = append(s, "Waiting: "+mapStringForWaiting+",\n") + } + keysForLeaving := make([]string, 0, len(this.Leaving)) + for k, _ := range this.Leaving { + keysForLeaving = append(keysForLeaving, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLeaving) + mapStringForLeaving := "map[string]Validators{" + for _, k := range keysForLeaving { + mapStringForLeaving += fmt.Sprintf("%#v: %#v,", k, this.Leaving[k]) + } + mapStringForLeaving += "}" + if this.Leaving != nil { + s = append(s, "Leaving: "+mapStringForLeaving+",\n") + } + keysForShuffledOut := make([]string, 0, len(this.ShuffledOut)) + for k, _ := range this.ShuffledOut { + keysForShuffledOut = append(keysForShuffledOut, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForShuffledOut) + mapStringForShuffledOut := "map[string]Validators{" + for _, k := range keysForShuffledOut { + mapStringForShuffledOut += fmt.Sprintf("%#v: %#v,", k, this.ShuffledOut[k]) + } + mapStringForShuffledOut += "}" + if this.ShuffledOut != nil { + s = append(s, "ShuffledOut: "+mapStringForShuffledOut+",\n") + } + s = append(s, "LowWaitingList: "+fmt.Sprintf("%#v", this.LowWaitingList)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NodesCoordinatorRegistryWithAuction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&nodesCoordinator.NodesCoordinatorRegistryWithAuction{") + s = append(s, "CurrentEpoch: "+fmt.Sprintf("%#v", this.CurrentEpoch)+",\n") + keysForEpochsConfigWithAuction := make([]string, 0, len(this.EpochsConfigWithAuction)) + for k, _ := range this.EpochsConfigWithAuction { + keysForEpochsConfigWithAuction = append(keysForEpochsConfigWithAuction, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEpochsConfigWithAuction) + mapStringForEpochsConfigWithAuction := "map[string]*EpochValidatorsWithAuction{" + for _, k := range keysForEpochsConfigWithAuction { + mapStringForEpochsConfigWithAuction += fmt.Sprintf("%#v: %#v,", k, this.EpochsConfigWithAuction[k]) + } + mapStringForEpochsConfigWithAuction += "}" + if this.EpochsConfigWithAuction != nil { + s = append(s, "EpochsConfigWithAuction: "+mapStringForEpochsConfigWithAuction+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringNodesCoordinatorRegistryWithAuction(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SerializableValidator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SerializableValidator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SerializableValidator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index != 0 { + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x18 + } + if m.Chances != 0 { + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(m.Chances)) + i-- + dAtA[i] = 0x10 + } + if len(m.PubKey) > 0 { + i -= len(m.PubKey) + copy(dAtA[i:], m.PubKey) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(m.PubKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Validators) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validators) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validators) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + for iNdEx := len(m.Data) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Data[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EpochValidatorsWithAuction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EpochValidatorsWithAuction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EpochValidatorsWithAuction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LowWaitingList { + i-- + if m.LowWaitingList { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.ShuffledOut) > 0 { + keysForShuffledOut := make([]string, 0, len(m.ShuffledOut)) + for k := range m.ShuffledOut { + keysForShuffledOut = append(keysForShuffledOut, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForShuffledOut) + for iNdEx := len(keysForShuffledOut) - 1; iNdEx >= 0; iNdEx-- { + v := m.ShuffledOut[string(keysForShuffledOut[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForShuffledOut[iNdEx]) + copy(dAtA[i:], keysForShuffledOut[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForShuffledOut[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Leaving) > 0 { + keysForLeaving := make([]string, 0, len(m.Leaving)) + for k := range m.Leaving { + keysForLeaving = append(keysForLeaving, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLeaving) + for iNdEx := len(keysForLeaving) - 1; iNdEx >= 0; iNdEx-- { + v := m.Leaving[string(keysForLeaving[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForLeaving[iNdEx]) + copy(dAtA[i:], keysForLeaving[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForLeaving[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Waiting) > 0 { + keysForWaiting := make([]string, 0, len(m.Waiting)) + for k := range m.Waiting { + keysForWaiting = append(keysForWaiting, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWaiting) + for iNdEx := len(keysForWaiting) - 1; iNdEx >= 0; iNdEx-- { + v := m.Waiting[string(keysForWaiting[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForWaiting[iNdEx]) + copy(dAtA[i:], keysForWaiting[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForWaiting[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Eligible) > 0 { + keysForEligible := make([]string, 0, len(m.Eligible)) + for k := range m.Eligible { + keysForEligible = append(keysForEligible, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEligible) + for iNdEx := len(keysForEligible) - 1; iNdEx >= 0; iNdEx-- { + v := m.Eligible[string(keysForEligible[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForEligible[iNdEx]) + copy(dAtA[i:], keysForEligible[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForEligible[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodesCoordinatorRegistryWithAuction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodesCoordinatorRegistryWithAuction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodesCoordinatorRegistryWithAuction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EpochsConfigWithAuction) > 0 { + keysForEpochsConfigWithAuction := make([]string, 0, len(m.EpochsConfigWithAuction)) + for k := range m.EpochsConfigWithAuction { + keysForEpochsConfigWithAuction = append(keysForEpochsConfigWithAuction, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEpochsConfigWithAuction) + for iNdEx := len(keysForEpochsConfigWithAuction) - 1; iNdEx >= 0; iNdEx-- { + v := m.EpochsConfigWithAuction[string(keysForEpochsConfigWithAuction[iNdEx])] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(keysForEpochsConfigWithAuction[iNdEx]) + copy(dAtA[i:], keysForEpochsConfigWithAuction[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForEpochsConfigWithAuction[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.CurrentEpoch != 0 { + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(m.CurrentEpoch)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintNodesCoordinatorRegistryWithAuction(dAtA []byte, offset int, v uint64) int { + offset -= sovNodesCoordinatorRegistryWithAuction(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SerializableValidator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PubKey) + if l > 0 { + n += 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + } + if m.Chances != 0 { + n += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(m.Chances)) + } + if m.Index != 0 { + n += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(m.Index)) + } + return n +} + +func (m *Validators) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Data) > 0 { + for _, e := range m.Data { + l = e.Size() + n += 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + } + } + return n +} + +func (m *EpochValidatorsWithAuction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Eligible) > 0 { + for k, v := range m.Eligible { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + if len(m.Waiting) > 0 { + for k, v := range m.Waiting { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + if len(m.Leaving) > 0 { + for k, v := range m.Leaving { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + if len(m.ShuffledOut) > 0 { + for k, v := range m.ShuffledOut { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + if m.LowWaitingList { + n += 2 + } + return n +} + +func (m *NodesCoordinatorRegistryWithAuction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CurrentEpoch != 0 { + n += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(m.CurrentEpoch)) + } + if len(m.EpochsConfigWithAuction) > 0 { + for k, v := range m.EpochsConfigWithAuction { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + l + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + return n +} + +func sovNodesCoordinatorRegistryWithAuction(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozNodesCoordinatorRegistryWithAuction(x uint64) (n int) { + return sovNodesCoordinatorRegistryWithAuction(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SerializableValidator) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SerializableValidator{`, + `PubKey:` + fmt.Sprintf("%v", this.PubKey) + `,`, + `Chances:` + fmt.Sprintf("%v", this.Chances) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func (this *Validators) String() string { + if this == nil { + return "nil" + } + repeatedStringForData := "[]*SerializableValidator{" + for _, f := range this.Data { + repeatedStringForData += strings.Replace(f.String(), "SerializableValidator", "SerializableValidator", 1) + "," + } + repeatedStringForData += "}" + s := strings.Join([]string{`&Validators{`, + `Data:` + repeatedStringForData + `,`, + `}`, + }, "") + return s +} +func (this *EpochValidatorsWithAuction) String() string { + if this == nil { + return "nil" + } + keysForEligible := make([]string, 0, len(this.Eligible)) + for k, _ := range this.Eligible { + keysForEligible = append(keysForEligible, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEligible) + mapStringForEligible := "map[string]Validators{" + for _, k := range keysForEligible { + mapStringForEligible += fmt.Sprintf("%v: %v,", k, this.Eligible[k]) + } + mapStringForEligible += "}" + keysForWaiting := make([]string, 0, len(this.Waiting)) + for k, _ := range this.Waiting { + keysForWaiting = append(keysForWaiting, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWaiting) + mapStringForWaiting := "map[string]Validators{" + for _, k := range keysForWaiting { + mapStringForWaiting += fmt.Sprintf("%v: %v,", k, this.Waiting[k]) + } + mapStringForWaiting += "}" + keysForLeaving := make([]string, 0, len(this.Leaving)) + for k, _ := range this.Leaving { + keysForLeaving = append(keysForLeaving, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLeaving) + mapStringForLeaving := "map[string]Validators{" + for _, k := range keysForLeaving { + mapStringForLeaving += fmt.Sprintf("%v: %v,", k, this.Leaving[k]) + } + mapStringForLeaving += "}" + keysForShuffledOut := make([]string, 0, len(this.ShuffledOut)) + for k, _ := range this.ShuffledOut { + keysForShuffledOut = append(keysForShuffledOut, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForShuffledOut) + mapStringForShuffledOut := "map[string]Validators{" + for _, k := range keysForShuffledOut { + mapStringForShuffledOut += fmt.Sprintf("%v: %v,", k, this.ShuffledOut[k]) + } + mapStringForShuffledOut += "}" + s := strings.Join([]string{`&EpochValidatorsWithAuction{`, + `Eligible:` + mapStringForEligible + `,`, + `Waiting:` + mapStringForWaiting + `,`, + `Leaving:` + mapStringForLeaving + `,`, + `ShuffledOut:` + mapStringForShuffledOut + `,`, + `LowWaitingList:` + fmt.Sprintf("%v", this.LowWaitingList) + `,`, + `}`, + }, "") + return s +} +func (this *NodesCoordinatorRegistryWithAuction) String() string { + if this == nil { + return "nil" + } + keysForEpochsConfigWithAuction := make([]string, 0, len(this.EpochsConfigWithAuction)) + for k, _ := range this.EpochsConfigWithAuction { + keysForEpochsConfigWithAuction = append(keysForEpochsConfigWithAuction, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEpochsConfigWithAuction) + mapStringForEpochsConfigWithAuction := "map[string]*EpochValidatorsWithAuction{" + for _, k := range keysForEpochsConfigWithAuction { + mapStringForEpochsConfigWithAuction += fmt.Sprintf("%v: %v,", k, this.EpochsConfigWithAuction[k]) + } + mapStringForEpochsConfigWithAuction += "}" + s := strings.Join([]string{`&NodesCoordinatorRegistryWithAuction{`, + `CurrentEpoch:` + fmt.Sprintf("%v", this.CurrentEpoch) + `,`, + `EpochsConfigWithAuction:` + mapStringForEpochsConfigWithAuction + `,`, + `}`, + }, "") + return s +} +func valueToStringNodesCoordinatorRegistryWithAuction(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SerializableValidator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SerializableValidator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SerializableValidator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKey = append(m.PubKey[:0], dAtA[iNdEx:postIndex]...) + if m.PubKey == nil { + m.PubKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Chances", wireType) + } + m.Chances = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Chances |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Validators) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Validators: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Validators: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data, &SerializableValidator{}) + if err := m.Data[len(m.Data)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EpochValidatorsWithAuction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EpochValidatorsWithAuction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EpochValidatorsWithAuction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Eligible", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Eligible == nil { + m.Eligible = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Eligible[mapkey] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Waiting == nil { + m.Waiting = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Waiting[mapkey] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leaving", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leaving == nil { + m.Leaving = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Leaving[mapkey] = *mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShuffledOut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShuffledOut == nil { + m.ShuffledOut = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ShuffledOut[mapkey] = *mapvalue + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LowWaitingList", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LowWaitingList = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodesCoordinatorRegistryWithAuction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodesCoordinatorRegistryWithAuction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodesCoordinatorRegistryWithAuction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpoch", wireType) + } + m.CurrentEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentEpoch |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochsConfigWithAuction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EpochsConfigWithAuction == nil { + m.EpochsConfigWithAuction = make(map[string]*EpochValidatorsWithAuction) + } + var mapkey string + var mapvalue *EpochValidatorsWithAuction + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &EpochValidatorsWithAuction{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.EpochsConfigWithAuction[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipNodesCoordinatorRegistryWithAuction(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupNodesCoordinatorRegistryWithAuction + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthNodesCoordinatorRegistryWithAuction = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowNodesCoordinatorRegistryWithAuction = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupNodesCoordinatorRegistryWithAuction = fmt.Errorf("proto: unexpected end of group") +) diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.proto b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.proto new file mode 100644 index 00000000000..d4b3ef455bd --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package proto; + +option go_package = "nodesCoordinator"; +option (gogoproto.stable_marshaler_all) = true; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message SerializableValidator { + bytes PubKey = 1 [(gogoproto.jsontag) = "pubKey"]; + uint32 Chances = 2 [(gogoproto.jsontag) = "chances"]; + uint32 Index = 3 [(gogoproto.jsontag) = "index"]; +} + +message Validators { + repeated SerializableValidator Data = 1; +} + +message EpochValidatorsWithAuction { + map Eligible = 1 [(gogoproto.nullable) = false]; + map Waiting = 2 [(gogoproto.nullable) = false]; + map Leaving = 3 [(gogoproto.nullable) = false]; + map ShuffledOut = 4 [(gogoproto.nullable) = false]; + bool LowWaitingList = 5; +} + +message NodesCoordinatorRegistryWithAuction { + uint32 CurrentEpoch = 1; + map EpochsConfigWithAuction = 2; +} diff --git a/sharding/nodesCoordinator/shardingArgs.go b/sharding/nodesCoordinator/shardingArgs.go index 71a6b2684c3..67c542952d7 100644 --- a/sharding/nodesCoordinator/shardingArgs.go +++ b/sharding/nodesCoordinator/shardingArgs.go @@ -11,26 +11,27 @@ import ( // ArgNodesCoordinator holds all dependencies required by the nodes coordinator in order to create new instances type ArgNodesCoordinator struct { - ShardConsensusGroupSize int - MetaConsensusGroupSize int - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - Shuffler NodesShuffler - EpochStartNotifier EpochStartEventNotifier - BootStorer storage.Storer - ShardIDAsObserver uint32 - NbShards uint32 - EligibleNodes map[uint32][]Validator - WaitingNodes map[uint32][]Validator - SelfPublicKey []byte - Epoch uint32 - StartEpoch uint32 - ConsensusGroupCache Cacher - ShuffledOutHandler ShuffledOutHandler - ChanStopNode chan endProcess.ArgEndProcess - NodeTypeProvider NodeTypeProviderHandler - IsFullArchive bool - EnableEpochsHandler common.EnableEpochsHandler - ValidatorInfoCacher epochStart.ValidatorInfoCacher - GenesisNodesSetupHandler GenesisNodesSetupHandler + ShardConsensusGroupSize int + MetaConsensusGroupSize int + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + Shuffler NodesShuffler + EpochStartNotifier EpochStartEventNotifier + BootStorer storage.Storer + ShardIDAsObserver uint32 + NbShards uint32 + EligibleNodes map[uint32][]Validator + WaitingNodes map[uint32][]Validator + SelfPublicKey []byte + Epoch uint32 + StartEpoch uint32 + ConsensusGroupCache Cacher + ShuffledOutHandler ShuffledOutHandler + ChanStopNode chan endProcess.ArgEndProcess + NodeTypeProvider NodeTypeProviderHandler + IsFullArchive bool + EnableEpochsHandler common.EnableEpochsHandler + ValidatorInfoCacher epochStart.ValidatorInfoCacher + GenesisNodesSetupHandler GenesisNodesSetupHandler + NodesCoordinatorRegistryFactory NodesCoordinatorRegistryFactory } diff --git a/state/accounts/peerAccount.go b/state/accounts/peerAccount.go index a63b71ff040..8900edc6f1b 100644 --- a/state/accounts/peerAccount.go +++ b/state/accounts/peerAccount.go @@ -100,7 +100,12 @@ func (pa *peerAccount) SetTempRating(rating uint32) { } // SetListAndIndex will update the peer's list (eligible, waiting) and the index inside it with journal -func (pa *peerAccount) SetListAndIndex(shardID uint32, list string, index uint32) { +func (pa *peerAccount) SetListAndIndex(shardID uint32, list string, index uint32, updatePreviousValues bool) { + if updatePreviousValues { + pa.PreviousList = pa.List + pa.PreviousIndexInList = pa.IndexInList + } + pa.ShardId = shardID pa.List = list pa.IndexInList = index @@ -158,6 +163,11 @@ func (pa *peerAccount) GetTotalValidatorSuccessRate() state.SignRate { return &pa.TotalValidatorSuccessRate } +// SetPreviousList sets validator's previous list +func (pa *peerAccount) SetPreviousList(list string) { + pa.PreviousList = list +} + // IsInterfaceNil return if there is no value under the interface func (pa *peerAccount) IsInterfaceNil() bool { return pa == nil diff --git a/state/accounts/peerAccountData.pb.go b/state/accounts/peerAccountData.pb.go index 42b84c24dda..eb0a6ef69d9 100644 --- a/state/accounts/peerAccountData.pb.go +++ b/state/accounts/peerAccountData.pb.go @@ -96,6 +96,8 @@ type PeerAccountData struct { TotalValidatorIgnoredSignaturesRate uint32 `protobuf:"varint,16,opt,name=TotalValidatorIgnoredSignaturesRate,proto3" json:"totalValidatorIgnoredSignaturesRate"` Nonce uint64 `protobuf:"varint,17,opt,name=Nonce,proto3" json:"nonce"` UnStakedEpoch uint32 `protobuf:"varint,18,opt,name=UnStakedEpoch,proto3" json:"unStakedEpoch"` + PreviousList string `protobuf:"bytes,19,opt,name=PreviousList,proto3" json:"previousList,omitempty"` + PreviousIndexInList uint32 `protobuf:"varint,20,opt,name=PreviousIndexInList,proto3" json:"previousIndexInList,omitempty"` } func (m *PeerAccountData) Reset() { *m = PeerAccountData{} } @@ -252,6 +254,20 @@ func (m *PeerAccountData) GetUnStakedEpoch() uint32 { return 0 } +func (m *PeerAccountData) GetPreviousList() string { + if m != nil { + return m.PreviousList + } + return "" +} + +func (m *PeerAccountData) GetPreviousIndexInList() uint32 { + if m != nil { + return m.PreviousIndexInList + } + return 0 +} + func init() { proto.RegisterType((*SignRate)(nil), "proto.SignRate") proto.RegisterType((*PeerAccountData)(nil), "proto.PeerAccountData") @@ -260,56 +276,59 @@ func init() { func init() { proto.RegisterFile("peerAccountData.proto", fileDescriptor_26bd0314afcce126) } var fileDescriptor_26bd0314afcce126 = []byte{ - // 774 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcf, 0x8f, 0xdb, 0x44, - 0x18, 0x8d, 0xcb, 0x26, 0xbb, 0x3b, 0x49, 0x36, 0xcd, 0xa8, 0x14, 0x67, 0x81, 0x99, 0x90, 0x0a, - 0xc8, 0x81, 0x24, 0xe2, 0x87, 0xc4, 0x81, 0x53, 0x5c, 0x5a, 0x29, 0xb0, 0x54, 0xab, 0x49, 0x41, - 0x08, 0x24, 0xa4, 0xc9, 0x78, 0xea, 0x98, 0xda, 0x9e, 0x68, 0x66, 0xbc, 0xec, 0xde, 0xb8, 0x72, - 0xeb, 0x9f, 0x81, 0xf8, 0x4b, 0x7a, 0xdc, 0xe3, 0x9e, 0x0c, 0xeb, 0xbd, 0x20, 0x9f, 0xfa, 0x27, - 0x20, 0x4f, 0xdc, 0x34, 0x69, 0x9d, 0x5d, 0x4e, 0x89, 0xdf, 0x7b, 0xdf, 0xfb, 0xbe, 0x79, 0xfe, - 0xc6, 0xe0, 0xed, 0x05, 0xe7, 0x72, 0xcc, 0x98, 0x88, 0x23, 0xfd, 0x35, 0xd5, 0x74, 0xb8, 0x90, - 0x42, 0x0b, 0x58, 0x35, 0x3f, 0x87, 0x03, 0xcf, 0xd7, 0xf3, 0x78, 0x36, 0x64, 0x22, 0x1c, 0x79, - 0xc2, 0x13, 0x23, 0x03, 0xcf, 0xe2, 0x27, 0xe6, 0xc9, 0x3c, 0x98, 0x7f, 0xcb, 0xaa, 0xde, 0x37, - 0x60, 0x6f, 0xea, 0x7b, 0x11, 0xa1, 0x9a, 0x43, 0x04, 0xc0, 0xa3, 0x38, 0x9c, 0xc6, 0x8c, 0x71, - 0xa5, 0x6c, 0xab, 0x6b, 0xf5, 0x9b, 0x64, 0x0d, 0x29, 0xf8, 0x87, 0xd4, 0x0f, 0x62, 0xc9, 0xed, - 0x5b, 0x2b, 0xbe, 0x40, 0x7a, 0x7f, 0xd4, 0x41, 0xeb, 0x78, 0x73, 0x36, 0xf8, 0x05, 0x68, 0x38, - 0x47, 0xd3, 0xe3, 0x78, 0x16, 0xf8, 0xec, 0x5b, 0x7e, 0x66, 0x5c, 0x1b, 0xce, 0xed, 0x2c, 0xc1, - 0x8d, 0x59, 0xa0, 0x56, 0x38, 0xd9, 0x50, 0xc1, 0x31, 0x68, 0x12, 0xfe, 0x1b, 0x95, 0xee, 0xd8, - 0x75, 0x65, 0x3e, 0xcc, 0x2d, 0x53, 0xf6, 0x6e, 0x96, 0xe0, 0x77, 0xe4, 0x3a, 0xf1, 0x89, 0x08, - 0x7d, 0xcd, 0xc3, 0x85, 0x3e, 0x23, 0x9b, 0x15, 0xf0, 0x43, 0xb0, 0x3b, 0x9d, 0x53, 0xe9, 0x4e, - 0x5c, 0xfb, 0xad, 0x7c, 0x52, 0xa7, 0x9e, 0x25, 0x78, 0x57, 0x2d, 0x21, 0xf2, 0x92, 0x83, 0x14, - 0xdc, 0xf9, 0x81, 0x06, 0xbe, 0x4b, 0xb5, 0x90, 0xc5, 0x39, 0xf3, 0x2c, 0xec, 0x9d, 0xae, 0xd5, - 0xaf, 0x7f, 0xd6, 0x5a, 0xa6, 0x34, 0x7c, 0x19, 0x91, 0xf3, 0xde, 0xf3, 0x04, 0x57, 0xb2, 0x04, - 0xdf, 0x39, 0x29, 0x29, 0x22, 0xa5, 0x56, 0xf0, 0x47, 0xd0, 0x3e, 0xe2, 0xd4, 0xe5, 0x1b, 0xfe, - 0xd5, 0x72, 0xff, 0x4e, 0xe1, 0xdf, 0x0e, 0x5e, 0xaf, 0x20, 0x6f, 0x9a, 0xc0, 0x5f, 0x01, 0x5a, - 0x75, 0x9c, 0x78, 0x91, 0x90, 0xdc, 0xcd, 0x9d, 0xa8, 0x8e, 0x25, 0x5f, 0xb6, 0xa9, 0x99, 0xa3, - 0xf7, 0xb2, 0x04, 0xa3, 0x93, 0x6b, 0x95, 0xe4, 0x06, 0x27, 0xd8, 0x03, 0x35, 0x42, 0xb5, 0x1f, - 0x79, 0xf6, 0xae, 0xf1, 0x04, 0x59, 0x82, 0x6b, 0xd2, 0x20, 0xa4, 0x60, 0xe0, 0x10, 0x80, 0xc7, - 0x3c, 0x5c, 0x14, 0xba, 0x3d, 0xa3, 0x3b, 0xc8, 0x12, 0x0c, 0xf4, 0x0a, 0x25, 0x6b, 0x0a, 0xf8, - 0xcc, 0x02, 0xad, 0x31, 0x63, 0x71, 0x18, 0x07, 0x54, 0x73, 0xf7, 0x21, 0xe7, 0xca, 0xde, 0x37, - 0x6f, 0xfa, 0x49, 0x96, 0xe0, 0x0e, 0xdd, 0xa4, 0x5e, 0xbd, 0xeb, 0xbf, 0xfe, 0xc6, 0x0f, 0x42, - 0xaa, 0xe7, 0xa3, 0x99, 0xef, 0x0d, 0x27, 0x91, 0xfe, 0x6a, 0x6d, 0xe7, 0xc3, 0x38, 0xd0, 0xfe, - 0x09, 0x97, 0xea, 0x74, 0x14, 0x9e, 0x0e, 0xd8, 0x9c, 0xfa, 0xd1, 0x80, 0x09, 0xc9, 0x07, 0x9e, - 0x18, 0xb9, 0xf9, 0x6d, 0x71, 0x7c, 0x6f, 0x12, 0xe9, 0xfb, 0x54, 0x69, 0x2e, 0xc9, 0xeb, 0xed, - 0xe1, 0x2f, 0xe0, 0x30, 0xdf, 0x78, 0x1e, 0x70, 0xa6, 0xb9, 0x3b, 0x89, 0x8a, 0xb8, 0x9d, 0x40, - 0xb0, 0xa7, 0xca, 0x06, 0xe6, 0x48, 0x28, 0x4b, 0xf0, 0x61, 0xb4, 0x55, 0x45, 0xae, 0x71, 0x80, - 0x9f, 0x82, 0xfa, 0x24, 0x72, 0xf9, 0xe9, 0x24, 0x3a, 0xf2, 0x95, 0xb6, 0xeb, 0xc6, 0xb0, 0x95, - 0x25, 0xb8, 0xee, 0xbf, 0x82, 0xc9, 0xba, 0x06, 0x7e, 0x04, 0x76, 0x8c, 0xb6, 0xd1, 0xb5, 0xfa, - 0xfb, 0x0e, 0xcc, 0x12, 0x7c, 0x10, 0xf8, 0x4a, 0xaf, 0xad, 0xbe, 0xe1, 0xe1, 0xcf, 0xa0, 0x73, - 0x5f, 0x44, 0x8a, 0xb3, 0x38, 0x0f, 0xe0, 0x58, 0x8a, 0x85, 0x50, 0x5c, 0x7e, 0xe7, 0x2b, 0xc5, - 0x95, 0xdd, 0x34, 0x8d, 0xde, 0xcf, 0x63, 0x65, 0xdb, 0x44, 0x64, 0x7b, 0x3d, 0x5c, 0x80, 0xce, - 0x63, 0xa1, 0x69, 0x50, 0x7a, 0x59, 0x0e, 0xca, 0x97, 0xf9, 0x83, 0x62, 0x99, 0x3b, 0x7a, 0x5b, - 0x25, 0xd9, 0x6e, 0x0a, 0x3d, 0x70, 0xd7, 0x90, 0x6f, 0xde, 0x9d, 0x56, 0x79, 0x3b, 0x54, 0xb4, - 0xbb, 0xab, 0x4b, 0xcb, 0xc8, 0x16, 0x3b, 0x78, 0x06, 0xee, 0x6d, 0x4e, 0x51, 0x7e, 0x95, 0x6e, - 0x9b, 0x04, 0x3f, 0xce, 0x12, 0x7c, 0x4f, 0xdf, 0x2c, 0x27, 0xff, 0xc7, 0x13, 0x62, 0x50, 0x7d, - 0x24, 0x22, 0xc6, 0xed, 0x76, 0xd7, 0xea, 0xef, 0x38, 0xfb, 0x59, 0x82, 0xab, 0x51, 0x0e, 0x90, - 0x25, 0x0e, 0xbf, 0x04, 0xcd, 0xef, 0xa3, 0xa9, 0xa6, 0x4f, 0xb9, 0xfb, 0x60, 0x21, 0xd8, 0xdc, - 0x86, 0x66, 0x8a, 0x76, 0x96, 0xe0, 0x66, 0xbc, 0x4e, 0x90, 0x4d, 0x9d, 0xe3, 0x9c, 0x5f, 0xa2, - 0xca, 0xc5, 0x25, 0xaa, 0xbc, 0xb8, 0x44, 0xd6, 0xef, 0x29, 0xb2, 0xfe, 0x4c, 0x91, 0xf5, 0x3c, - 0x45, 0xd6, 0x79, 0x8a, 0xac, 0x8b, 0x14, 0x59, 0xff, 0xa4, 0xc8, 0xfa, 0x37, 0x45, 0x95, 0x17, - 0x29, 0xb2, 0x9e, 0x5d, 0xa1, 0xca, 0xf9, 0x15, 0xaa, 0x5c, 0x5c, 0xa1, 0xca, 0x4f, 0x7b, 0x74, - 0xf9, 0xf9, 0x56, 0xb3, 0x9a, 0x09, 0xf8, 0xf3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x7c, 0xff, - 0x1c, 0x23, 0x71, 0x06, 0x00, 0x00, + // 822 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4f, 0x6f, 0xdb, 0x36, + 0x1c, 0xb5, 0xba, 0xfc, 0xa5, 0xed, 0xb8, 0x61, 0xb3, 0x4e, 0xce, 0x56, 0x32, 0x4d, 0xb1, 0x2d, + 0x87, 0xc5, 0xc6, 0xfe, 0x00, 0x3b, 0x0c, 0x18, 0x10, 0x75, 0x2d, 0xe0, 0x2d, 0x2b, 0x02, 0xba, + 0x1b, 0x86, 0x0d, 0x18, 0x40, 0x4b, 0xac, 0xcc, 0x55, 0x12, 0x05, 0x92, 0xca, 0x92, 0xdb, 0x3e, + 0x42, 0x3f, 0xc1, 0xce, 0xc3, 0x3e, 0x49, 0x8f, 0x39, 0xe6, 0xc4, 0x2d, 0xce, 0x65, 0xd0, 0xa9, + 0x1f, 0x61, 0x10, 0xad, 0xb8, 0x72, 0x23, 0xb7, 0x3d, 0xd9, 0x7c, 0xef, 0xfd, 0xde, 0x8f, 0xfc, + 0xf1, 0x11, 0x02, 0xef, 0xa6, 0x8c, 0xc9, 0x03, 0xdf, 0x17, 0x59, 0xa2, 0xbf, 0xa1, 0x9a, 0xf6, + 0x52, 0x29, 0xb4, 0x80, 0xcb, 0xf6, 0x67, 0x7b, 0x3f, 0xe4, 0x7a, 0x9c, 0x8d, 0x7a, 0xbe, 0x88, + 0xfb, 0xa1, 0x08, 0x45, 0xdf, 0xc2, 0xa3, 0xec, 0x89, 0x5d, 0xd9, 0x85, 0xfd, 0x37, 0xad, 0xda, + 0xfd, 0x16, 0xac, 0x0d, 0x79, 0x98, 0x10, 0xaa, 0x19, 0x44, 0x00, 0x3c, 0xca, 0xe2, 0x61, 0xe6, + 0xfb, 0x4c, 0x29, 0xd7, 0xd9, 0x71, 0xf6, 0xda, 0xa4, 0x82, 0x94, 0xfc, 0x43, 0xca, 0xa3, 0x4c, + 0x32, 0xf7, 0xc6, 0x8c, 0x2f, 0x91, 0xdd, 0x3f, 0x5b, 0xa0, 0x73, 0x34, 0xbf, 0x37, 0xf8, 0x05, + 0x68, 0x79, 0x87, 0xc3, 0xa3, 0x6c, 0x14, 0x71, 0xff, 0x3b, 0x76, 0x6a, 0x5d, 0x5b, 0xde, 0xcd, + 0xdc, 0xe0, 0xd6, 0x28, 0x52, 0x33, 0x9c, 0xcc, 0xa9, 0xe0, 0x01, 0x68, 0x13, 0xf6, 0x3b, 0x95, + 0xc1, 0x41, 0x10, 0xc8, 0x62, 0x33, 0x37, 0x6c, 0xd9, 0xfb, 0xb9, 0xc1, 0xef, 0xc9, 0x2a, 0xf1, + 0x89, 0x88, 0xb9, 0x66, 0x71, 0xaa, 0x4f, 0xc9, 0x7c, 0x05, 0xfc, 0x10, 0xac, 0x0e, 0xc7, 0x54, + 0x06, 0x83, 0xc0, 0x7d, 0xa7, 0xd8, 0xa9, 0xd7, 0xcc, 0x0d, 0x5e, 0x55, 0x53, 0x88, 0x5c, 0x71, + 0x90, 0x82, 0xad, 0x1f, 0x69, 0xc4, 0x03, 0xaa, 0x85, 0x2c, 0xcf, 0x59, 0xcc, 0xc2, 0x5d, 0xda, + 0x71, 0xf6, 0x9a, 0x9f, 0x75, 0xa6, 0x53, 0xea, 0x5d, 0x8d, 0xc8, 0xfb, 0xe0, 0xb9, 0xc1, 0x8d, + 0xdc, 0xe0, 0xad, 0xe3, 0x9a, 0x22, 0x52, 0x6b, 0x05, 0x7f, 0x02, 0x9b, 0x87, 0x8c, 0x06, 0x6c, + 0xce, 0x7f, 0xb9, 0xde, 0xbf, 0x5b, 0xfa, 0x6f, 0x46, 0xaf, 0x56, 0x90, 0xeb, 0x26, 0xf0, 0x37, + 0x80, 0x66, 0x1d, 0x07, 0x61, 0x22, 0x24, 0x0b, 0x0a, 0x27, 0xaa, 0x33, 0xc9, 0xa6, 0x6d, 0x56, + 0xec, 0xd1, 0x77, 0x73, 0x83, 0xd1, 0xf1, 0x6b, 0x95, 0xe4, 0x0d, 0x4e, 0x70, 0x17, 0xac, 0x10, + 0xaa, 0x79, 0x12, 0xba, 0xab, 0xd6, 0x13, 0xe4, 0x06, 0xaf, 0x48, 0x8b, 0x90, 0x92, 0x81, 0x3d, + 0x00, 0x1e, 0xb3, 0x38, 0x2d, 0x75, 0x6b, 0x56, 0xb7, 0x91, 0x1b, 0x0c, 0xf4, 0x0c, 0x25, 0x15, + 0x05, 0x7c, 0xe6, 0x80, 0xce, 0x81, 0xef, 0x67, 0x71, 0x16, 0x51, 0xcd, 0x82, 0x87, 0x8c, 0x29, + 0x77, 0xdd, 0xde, 0xf4, 0x93, 0xdc, 0xe0, 0x2e, 0x9d, 0xa7, 0x5e, 0xde, 0xf5, 0xdf, 0xff, 0xe0, + 0x07, 0x31, 0xd5, 0xe3, 0xfe, 0x88, 0x87, 0xbd, 0x41, 0xa2, 0xbf, 0xaa, 0x64, 0x3e, 0xce, 0x22, + 0xcd, 0x8f, 0x99, 0x54, 0x27, 0xfd, 0xf8, 0x64, 0xdf, 0x1f, 0x53, 0x9e, 0xec, 0xfb, 0x42, 0xb2, + 0xfd, 0x50, 0xf4, 0x83, 0xe2, 0xb5, 0x78, 0x3c, 0x1c, 0x24, 0xfa, 0x3e, 0x55, 0x9a, 0x49, 0xf2, + 0x6a, 0x7b, 0xf8, 0x2b, 0xd8, 0x2e, 0x12, 0xcf, 0x22, 0xe6, 0x6b, 0x16, 0x0c, 0x92, 0x72, 0xdc, + 0x5e, 0x24, 0xfc, 0xa7, 0xca, 0x05, 0xf6, 0x48, 0x28, 0x37, 0x78, 0x3b, 0x59, 0xa8, 0x22, 0xaf, + 0x71, 0x80, 0x9f, 0x82, 0xe6, 0x20, 0x09, 0xd8, 0xc9, 0x20, 0x39, 0xe4, 0x4a, 0xbb, 0x4d, 0x6b, + 0xd8, 0xc9, 0x0d, 0x6e, 0xf2, 0x97, 0x30, 0xa9, 0x6a, 0xe0, 0x47, 0x60, 0xc9, 0x6a, 0x5b, 0x3b, + 0xce, 0xde, 0xba, 0x07, 0x73, 0x83, 0x37, 0x22, 0xae, 0x74, 0x25, 0xfa, 0x96, 0x87, 0xbf, 0x80, + 0xee, 0x7d, 0x91, 0x28, 0xe6, 0x67, 0xc5, 0x00, 0x8e, 0xa4, 0x48, 0x85, 0x62, 0xf2, 0x7b, 0xae, + 0x14, 0x53, 0x6e, 0xdb, 0x36, 0xba, 0x53, 0x8c, 0xd5, 0x5f, 0x24, 0x22, 0x8b, 0xeb, 0x61, 0x0a, + 0xba, 0x8f, 0x85, 0xa6, 0x51, 0xed, 0x63, 0xd9, 0xa8, 0x0f, 0xf3, 0xdd, 0x32, 0xcc, 0x5d, 0xbd, + 0xa8, 0x92, 0x2c, 0x36, 0x85, 0x21, 0xb8, 0x6d, 0xc9, 0xeb, 0x6f, 0xa7, 0x53, 0xdf, 0x0e, 0x95, + 0xed, 0x6e, 0xeb, 0xda, 0x32, 0xb2, 0xc0, 0x0e, 0x9e, 0x82, 0x7b, 0xf3, 0xbb, 0xa8, 0x7f, 0x4a, + 0x37, 0xed, 0x04, 0x3f, 0xce, 0x0d, 0xbe, 0xa7, 0xdf, 0x2c, 0x27, 0x6f, 0xe3, 0x09, 0x31, 0x58, + 0x7e, 0x24, 0x12, 0x9f, 0xb9, 0x9b, 0x3b, 0xce, 0xde, 0x92, 0xb7, 0x9e, 0x1b, 0xbc, 0x9c, 0x14, + 0x00, 0x99, 0xe2, 0xf0, 0x4b, 0xd0, 0xfe, 0x21, 0x19, 0x6a, 0xfa, 0x94, 0x05, 0x0f, 0x52, 0xe1, + 0x8f, 0x5d, 0x68, 0x77, 0xb1, 0x99, 0x1b, 0xdc, 0xce, 0xaa, 0x04, 0x99, 0xd7, 0xc1, 0xaf, 0x41, + 0xeb, 0x48, 0xb2, 0x63, 0x2e, 0x32, 0x65, 0xc3, 0x73, 0xcb, 0x86, 0x67, 0xbb, 0x18, 0x4f, 0x5a, + 0xc1, 0x2b, 0x21, 0x9a, 0xd3, 0xc3, 0x21, 0xb8, 0x75, 0xb5, 0xae, 0xe6, 0x75, 0xcb, 0xb6, 0xbf, + 0x9b, 0x1b, 0x7c, 0x27, 0xbd, 0x4e, 0x57, 0xdc, 0xea, 0xaa, 0x3d, 0xef, 0xec, 0x02, 0x35, 0xce, + 0x2f, 0x50, 0xe3, 0xc5, 0x05, 0x72, 0xfe, 0x98, 0x20, 0xe7, 0xaf, 0x09, 0x72, 0x9e, 0x4f, 0x90, + 0x73, 0x36, 0x41, 0xce, 0xf9, 0x04, 0x39, 0xff, 0x4e, 0x90, 0xf3, 0xdf, 0x04, 0x35, 0x5e, 0x4c, + 0x90, 0xf3, 0xec, 0x12, 0x35, 0xce, 0x2e, 0x51, 0xe3, 0xfc, 0x12, 0x35, 0x7e, 0x5e, 0xa3, 0xd3, + 0x6f, 0x8a, 0x1a, 0xad, 0xd8, 0x5b, 0xff, 0xfc, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x70, 0x40, + 0xd1, 0x9b, 0x06, 0x07, 0x00, 0x00, } func (this *SignRate) Equal(that interface{}) bool { @@ -415,6 +434,12 @@ func (this *PeerAccountData) Equal(that interface{}) bool { if this.UnStakedEpoch != that1.UnStakedEpoch { return false } + if this.PreviousList != that1.PreviousList { + return false + } + if this.PreviousIndexInList != that1.PreviousIndexInList { + return false + } return true } func (this *SignRate) GoString() string { @@ -432,7 +457,7 @@ func (this *PeerAccountData) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 22) + s := make([]string, 0, 24) s = append(s, "&accounts.PeerAccountData{") s = append(s, "BLSPublicKey: "+fmt.Sprintf("%#v", this.BLSPublicKey)+",\n") s = append(s, "RewardAddress: "+fmt.Sprintf("%#v", this.RewardAddress)+",\n") @@ -452,6 +477,8 @@ func (this *PeerAccountData) GoString() string { s = append(s, "TotalValidatorIgnoredSignaturesRate: "+fmt.Sprintf("%#v", this.TotalValidatorIgnoredSignaturesRate)+",\n") s = append(s, "Nonce: "+fmt.Sprintf("%#v", this.Nonce)+",\n") s = append(s, "UnStakedEpoch: "+fmt.Sprintf("%#v", this.UnStakedEpoch)+",\n") + s = append(s, "PreviousList: "+fmt.Sprintf("%#v", this.PreviousList)+",\n") + s = append(s, "PreviousIndexInList: "+fmt.Sprintf("%#v", this.PreviousIndexInList)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -516,6 +543,22 @@ func (m *PeerAccountData) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreviousIndexInList != 0 { + i = encodeVarintPeerAccountData(dAtA, i, uint64(m.PreviousIndexInList)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if len(m.PreviousList) > 0 { + i -= len(m.PreviousList) + copy(dAtA[i:], m.PreviousList) + i = encodeVarintPeerAccountData(dAtA, i, uint64(len(m.PreviousList))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } if m.UnStakedEpoch != 0 { i = encodeVarintPeerAccountData(dAtA, i, uint64(m.UnStakedEpoch)) i-- @@ -734,6 +777,13 @@ func (m *PeerAccountData) Size() (n int) { if m.UnStakedEpoch != 0 { n += 2 + sovPeerAccountData(uint64(m.UnStakedEpoch)) } + l = len(m.PreviousList) + if l > 0 { + n += 2 + l + sovPeerAccountData(uint64(l)) + } + if m.PreviousIndexInList != 0 { + n += 2 + sovPeerAccountData(uint64(m.PreviousIndexInList)) + } return n } @@ -777,6 +827,8 @@ func (this *PeerAccountData) String() string { `TotalValidatorIgnoredSignaturesRate:` + fmt.Sprintf("%v", this.TotalValidatorIgnoredSignaturesRate) + `,`, `Nonce:` + fmt.Sprintf("%v", this.Nonce) + `,`, `UnStakedEpoch:` + fmt.Sprintf("%v", this.UnStakedEpoch) + `,`, + `PreviousList:` + fmt.Sprintf("%v", this.PreviousList) + `,`, + `PreviousIndexInList:` + fmt.Sprintf("%v", this.PreviousIndexInList) + `,`, `}`, }, "") return s @@ -1369,6 +1421,57 @@ func (m *PeerAccountData) Unmarshal(dAtA []byte) error { break } } + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerAccountData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPeerAccountData + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPeerAccountData + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousList = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousIndexInList", wireType) + } + m.PreviousIndexInList = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerAccountData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreviousIndexInList |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipPeerAccountData(dAtA[iNdEx:]) diff --git a/state/accounts/peerAccountData.proto b/state/accounts/peerAccountData.proto index d4cc3292c38..1a3e99a295f 100644 --- a/state/accounts/peerAccountData.proto +++ b/state/accounts/peerAccountData.proto @@ -33,4 +33,6 @@ message PeerAccountData { uint32 TotalValidatorIgnoredSignaturesRate = 16 [(gogoproto.jsontag) = "totalValidatorIgnoredSignaturesRate"]; uint64 Nonce = 17 [(gogoproto.jsontag) = "nonce"]; uint32 UnStakedEpoch = 18 [(gogoproto.jsontag) = "unStakedEpoch"]; + string PreviousList = 19 [(gogoproto.jsontag) = "previousList,omitempty"]; + uint32 PreviousIndexInList = 20 [(gogoproto.jsontag) = "previousIndexInList,omitempty"]; } diff --git a/state/accountsDB.go b/state/accountsDB.go index 06fb88eac3a..4274e5138d6 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -431,7 +431,8 @@ func (adb *AccountsDB) loadDataTrieConcurrentSafe(accountHandler baseAccountHand return nil } - dataTrie, err := mainTrie.Recreate(accountHandler.GetRootHash()) + rootHashHolder := holders.NewDefaultRootHashesHolder(accountHandler.GetRootHash()) + dataTrie, err := mainTrie.Recreate(rootHashHolder) if err != nil { return fmt.Errorf("trie was not found for hash, rootHash = %s, err = %w", hex.EncodeToString(accountHandler.GetRootHash()), err) } @@ -556,7 +557,8 @@ func (adb *AccountsDB) removeDataTrie(baseAcc baseAccountHandler) error { return nil } - dataTrie, err := adb.mainTrie.Recreate(rootHash) + rootHashHolder := holders.NewDefaultRootHashesHolder(rootHash) + dataTrie, err := adb.mainTrie.Recreate(rootHashHolder) if err != nil { return err } @@ -745,7 +747,7 @@ func (adb *AccountsDB) RevertToSnapshot(snapshot int) error { if snapshot == 0 { log.Trace("revert snapshot to adb.lastRootHash", "hash", adb.lastRootHash) - return adb.recreateTrie(holders.NewRootHashHolder(adb.lastRootHash, core.OptionalUint32{})) + return adb.recreateTrie(holders.NewDefaultRootHashesHolder(adb.lastRootHash)) } for i := len(adb.entries) - 1; i >= snapshot; i-- { @@ -785,6 +787,7 @@ func (adb *AccountsDB) CommitInEpoch(currentEpoch uint32, epochToCommit uint32) adb.mutOp.Lock() defer func() { adb.mainTrie.GetStorageManager().SetEpochForPutOperation(currentEpoch) + adb.mainTrie.GetStorageManager().GetStateStatsHandler().Reset() adb.mutOp.Unlock() adb.loadCodeMeasurements.resetAndPrint() }() @@ -910,13 +913,8 @@ func (adb *AccountsDB) RootHash() ([]byte, error) { return rootHash, err } -// RecreateTrie is used to reload the trie based on an existing rootHash -func (adb *AccountsDB) RecreateTrie(rootHash []byte) error { - return adb.RecreateTrieFromEpoch(holders.NewRootHashHolder(rootHash, core.OptionalUint32{})) -} - -// RecreateTrieFromEpoch is used to reload the trie based on the provided options -func (adb *AccountsDB) RecreateTrieFromEpoch(options common.RootHashHolder) error { +// RecreateTrie is used to reload the trie based on the provided options +func (adb *AccountsDB) RecreateTrie(options common.RootHashHolder) error { adb.mutOp.Lock() defer adb.mutOp.Unlock() @@ -938,7 +936,7 @@ func (adb *AccountsDB) recreateTrie(options common.RootHashHolder) error { adb.obsoleteDataTrieHashes = make(map[string][][]byte) adb.dataTries.Reset() adb.entries = make([]JournalEntry, 0) - newTrie, err := adb.mainTrie.RecreateFromEpoch(options) + newTrie, err := adb.mainTrie.Recreate(options) if err != nil { return err } @@ -984,7 +982,8 @@ func (adb *AccountsDB) RecreateAllTries(rootHash []byte) (map[string]common.Trie userAccountRootHash := userAccount.GetRootHash() if len(userAccountRootHash) > 0 { - dataTrie, errRecreate := mainTrie.Recreate(userAccountRootHash) + rootHashHolder := holders.NewDefaultRootHashesHolder(userAccountRootHash) + dataTrie, errRecreate := mainTrie.Recreate(rootHashHolder) if errRecreate != nil { return nil, errRecreate } @@ -1022,7 +1021,8 @@ func getUserAccountFromBytes(accountFactory AccountFactory, marshaller marshal.M } func (adb *AccountsDB) recreateMainTrie(rootHash []byte) (map[string]common.Trie, error) { - recreatedTrie, err := adb.getMainTrie().Recreate(rootHash) + rootHashHolder := holders.NewDefaultRootHashesHolder(rootHash) + recreatedTrie, err := adb.getMainTrie().Recreate(rootHashHolder) if err != nil { return nil, err } @@ -1035,7 +1035,8 @@ func (adb *AccountsDB) recreateMainTrie(rootHash []byte) (map[string]common.Trie // GetTrie returns the trie that has the given rootHash func (adb *AccountsDB) GetTrie(rootHash []byte) (common.Trie, error) { - return adb.getMainTrie().Recreate(rootHash) + rootHashHolder := holders.NewDefaultRootHashesHolder(rootHash) + return adb.getMainTrie().Recreate(rootHashHolder) } // Journalize adds a new object to entries list. diff --git a/state/accountsDBApi.go b/state/accountsDBApi.go index af8fdd5a763..9d0a67ef9fa 100644 --- a/state/accountsDBApi.go +++ b/state/accountsDBApi.go @@ -63,7 +63,8 @@ func (accountsDB *accountsDBApi) doRecreateTrieWithBlockInfo(newBlockInfo common return currentBlockInfo, nil } - err := accountsDB.innerAccountsAdapter.RecreateTrie(newBlockInfo.GetRootHash()) + rootHashHolder := holders.NewDefaultRootHashesHolder(newBlockInfo.GetRootHash()) + err := accountsDB.innerAccountsAdapter.RecreateTrie(rootHashHolder) if err != nil { accountsDB.blockInfo = nil return nil, err @@ -164,15 +165,29 @@ func (accountsDB *accountsDBApi) RootHash() ([]byte, error) { return blockInfo.GetRootHash(), nil } -// RecreateTrie is used to reload the trie based on an existing rootHash -func (accountsDB *accountsDBApi) RecreateTrie(rootHash []byte) error { - _, err := accountsDB.doRecreateTrieWithBlockInfo(holders.NewBlockInfo([]byte{}, 0, rootHash)) - return err -} +// RecreateTrie is a not permitted operation in this implementation and thus, will return an error +func (accountsDB *accountsDBApi) RecreateTrie(options common.RootHashHolder) error { + accountsDB.mutRecreatedTrieBlockInfo.Lock() + defer accountsDB.mutRecreatedTrieBlockInfo.Unlock() -// RecreateTrieFromEpoch is a not permitted operation in this implementation and thus, will return an error -func (accountsDB *accountsDBApi) RecreateTrieFromEpoch(_ common.RootHashHolder) error { - return ErrOperationNotPermitted + if check.IfNil(options) { + return ErrNilRootHashHolder + } + + newBlockInfo := holders.NewBlockInfo([]byte{}, 0, options.GetRootHash()) + if newBlockInfo.Equal(accountsDB.blockInfo) { + return nil + } + + err := accountsDB.innerAccountsAdapter.RecreateTrie(options) + if err != nil { + accountsDB.blockInfo = nil + return err + } + + accountsDB.blockInfo = newBlockInfo + + return nil } // PruneTrie is a not permitted operation in this implementation and thus, does nothing diff --git a/state/accountsDBApiWithHistory.go b/state/accountsDBApiWithHistory.go index 76994768f6c..b0f22bb19b1 100644 --- a/state/accountsDBApiWithHistory.go +++ b/state/accountsDBApiWithHistory.go @@ -94,12 +94,7 @@ func (accountsDB *accountsDBApiWithHistory) RootHash() ([]byte, error) { } // RecreateTrie is a not permitted operation in this implementation and thus, will return an error -func (accountsDB *accountsDBApiWithHistory) RecreateTrie(_ []byte) error { - return ErrOperationNotPermitted -} - -// RecreateTrieFromEpoch is a not permitted operation in this implementation and thus, will return an error -func (accountsDB *accountsDBApiWithHistory) RecreateTrieFromEpoch(_ common.RootHashHolder) error { +func (accountsDB *accountsDBApiWithHistory) RecreateTrie(_ common.RootHashHolder) error { return ErrOperationNotPermitted } @@ -228,7 +223,7 @@ func (accountsDB *accountsDBApiWithHistory) shouldRecreateTrieUnprotected(rootHa } func (accountsDB *accountsDBApiWithHistory) recreateTrieUnprotected(options common.RootHashHolder) error { - err := accountsDB.innerAccountsAdapter.RecreateTrieFromEpoch(options) + err := accountsDB.innerAccountsAdapter.RecreateTrie(options) if err != nil { return err } diff --git a/state/accountsDBApiWithHistory_test.go b/state/accountsDBApiWithHistory_test.go index 4d9e1a28341..199acb27a48 100644 --- a/state/accountsDBApiWithHistory_test.go +++ b/state/accountsDBApiWithHistory_test.go @@ -8,7 +8,6 @@ import ( "sync" "testing" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" @@ -99,14 +98,14 @@ func TestAccountsDBApiWithHistory_NotPermittedOrNotImplementedOperationsDoNotPan func TestAccountsDBApiWithHistory_GetAccountWithBlockInfo(t *testing.T) { rootHash := []byte("rootHash") - options := holders.NewRootHashHolder(rootHash, core.OptionalUint32{}) + options := holders.NewDefaultRootHashesHolder(rootHash) arbitraryError := errors.New("arbitrary error") t.Run("recreate trie fails", func(t *testing.T) { expectedErr := errors.New("expected error") accountsAdapter := &mockState.AccountsStub{ - RecreateTrieFromEpochCalled: func(_ common.RootHashHolder) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return expectedErr }, } @@ -122,7 +121,7 @@ func TestAccountsDBApiWithHistory_GetAccountWithBlockInfo(t *testing.T) { var recreatedRootHash []byte accountsAdapter := &mockState.AccountsStub{ - RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + RecreateTrieCalled: func(options common.RootHashHolder) error { recreatedRootHash = options.GetRootHash() return nil }, @@ -147,7 +146,7 @@ func TestAccountsDBApiWithHistory_GetAccountWithBlockInfo(t *testing.T) { var recreatedRootHash []byte accountsAdapter := &mockState.AccountsStub{ - RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + RecreateTrieCalled: func(options common.RootHashHolder) error { recreatedRootHash = options.GetRootHash() return nil }, @@ -168,7 +167,7 @@ func TestAccountsDBApiWithHistory_GetAccountWithBlockInfo(t *testing.T) { var recreatedRootHash []byte accountsAdapter := &mockState.AccountsStub{ - RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + RecreateTrieCalled: func(options common.RootHashHolder) error { recreatedRootHash = options.GetRootHash() return nil }, @@ -189,13 +188,13 @@ func TestAccountsDBApiWithHistory_GetAccountWithBlockInfo(t *testing.T) { func TestAccountsDBApiWithHistory_GetCodeWithBlockInfo(t *testing.T) { contractCodeHash := []byte("codeHash") rootHash := []byte("rootHash") - options := holders.NewRootHashHolder(rootHash, core.OptionalUint32{}) + options := holders.NewDefaultRootHashesHolder(rootHash) t.Run("recreate trie fails", func(t *testing.T) { expectedErr := errors.New("expected error") accountsAdapter := &mockState.AccountsStub{ - RecreateTrieFromEpochCalled: func(_ common.RootHashHolder) error { + RecreateTrieCalled: func(_ common.RootHashHolder) error { return expectedErr }, } @@ -211,7 +210,7 @@ func TestAccountsDBApiWithHistory_GetCodeWithBlockInfo(t *testing.T) { var recreatedRootHash []byte accountsAdapter := &mockState.AccountsStub{ - RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + RecreateTrieCalled: func(options common.RootHashHolder) error { recreatedRootHash = options.GetRootHash() return nil }, @@ -249,7 +248,7 @@ func TestAccountsDBApiWithHistory_GetAccountWithBlockInfoWhenHighConcurrency(t * var dummyAccountMutex sync.RWMutex accountsAdapter := &mockState.AccountsStub{ - RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + RecreateTrieCalled: func(options common.RootHashHolder) error { rootHash := options.GetRootHash() dummyAccountMutex.Lock() @@ -283,7 +282,7 @@ func TestAccountsDBApiWithHistory_GetAccountWithBlockInfoWhenHighConcurrency(t * go func(rootHashAsInt int) { rootHashAsString := fmt.Sprintf("%d", rootHashAsInt) rootHash := []byte(rootHashAsString) - options := holders.NewRootHashHolder(rootHash, core.OptionalUint32{}) + options := holders.NewDefaultRootHashesHolder(rootHash) account, blockInfo, _ := accountsApiWithHistory.GetAccountWithBlockInfo([]byte("address"), options) userAccount := account.(state.UserAccountHandler) diff --git a/state/accountsDBApi_test.go b/state/accountsDBApi_test.go index aee169c4f64..35f80992d5a 100644 --- a/state/accountsDBApi_test.go +++ b/state/accountsDBApi_test.go @@ -16,7 +16,8 @@ import ( "github.com/multiversx/mx-chain-go/state/parsers" "github.com/multiversx/mx-chain-go/testscommon" mockState "github.com/multiversx/mx-chain-go/testscommon/state" - "github.com/multiversx/mx-chain-go/testscommon/trie" + testTrie "github.com/multiversx/mx-chain-go/testscommon/trie" + "github.com/multiversx/mx-chain-go/trie" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -70,7 +71,7 @@ func TestAccountsDBAPi_recreateTrieIfNecessary(t *testing.T) { t.Parallel() accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { require.Fail(t, "should have not called RecreateAllTriesCalled") return nil @@ -108,7 +109,7 @@ func TestAccountsDBAPi_recreateTrieIfNecessary(t *testing.T) { t.Parallel() accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { require.Fail(t, "should have not called RecreateAllTriesCalled") return nil @@ -125,7 +126,7 @@ func TestAccountsDBAPi_recreateTrieIfNecessary(t *testing.T) { oldRootHash := []byte("old root hash") accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { assert.Equal(t, rootHash, rootHash) return nil @@ -145,7 +146,7 @@ func TestAccountsDBAPi_recreateTrieIfNecessary(t *testing.T) { oldRootHash := []byte("old root hash") accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { assert.Equal(t, rootHash, rootHash) return expectedErr @@ -168,7 +169,7 @@ func TestAccountsDBAPi_doRecreateTrieWhenReEntranceHappened(t *testing.T) { targetRootHash := []byte("root hash") numCalled := 0 accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { numCalled++ return nil }, @@ -195,7 +196,6 @@ func TestAccountsDBApi_NotPermittedOperations(t *testing.T) { assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.SaveAccount(nil)) assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.RemoveAccount(nil)) assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.RevertToSnapshot(0)) - assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.RecreateTrieFromEpoch(nil)) buff, err := accountsApi.CommitInEpoch(0, 0) assert.Nil(t, buff) @@ -215,17 +215,52 @@ func TestAccountsDBApi_RecreateTrie(t *testing.T) { wasCalled := false accountsApi, _ := state.NewAccountsDBApi(&mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { wasCalled = true return nil }, }, createBlockInfoProviderStub(dummyRootHash)) - err := accountsApi.RecreateTrie(nil) + err := accountsApi.RecreateTrie(holders.NewDefaultRootHashesHolder([]byte{})) assert.NoError(t, err) assert.True(t, wasCalled) } +func TestAccountsDBApi_RecreateTrieFromEpoch(t *testing.T) { + t.Parallel() + + t.Run("should error if the roothash holder is nil", func(t *testing.T) { + accountsApi, _ := state.NewAccountsDBApi(&mockState.AccountsStub{ + RecreateTrieCalled: func(options common.RootHashHolder) error { + assert.Fail(t, "should have not called accountsApi.RecreateTrieFromEpochCalled") + + return nil + }, + }, createBlockInfoProviderStub(dummyRootHash)) + + err := accountsApi.RecreateTrie(nil) + assert.Equal(t, trie.ErrNilRootHashHolder, err) + }) + t.Run("should work", func(t *testing.T) { + wasCalled := false + rootHash := []byte("root hash") + epoch := core.OptionalUint32{Value: 37, HasValue: true} + accountsApi, _ := state.NewAccountsDBApi(&mockState.AccountsStub{ + RecreateTrieCalled: func(options common.RootHashHolder) error { + wasCalled = true + assert.Equal(t, rootHash, options.GetRootHash()) + assert.Equal(t, epoch, options.GetEpoch()) + return nil + }, + }, createBlockInfoProviderStub(dummyRootHash)) + + holder := holders.NewRootHashHolder(rootHash, epoch) + err := accountsApi.RecreateTrie(holder) + assert.NoError(t, err) + assert.True(t, wasCalled) + }) +} + func TestAccountsDBApi_EmptyMethodsShouldNotPanic(t *testing.T) { t.Parallel() @@ -253,7 +288,7 @@ func TestAccountsDBApi_SimpleProxyMethodsShouldWork(t *testing.T) { closeCalled := false getTrieCalled := false accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { require.Fail(t, "should have not called RecreateTrieCalled") return nil @@ -272,7 +307,7 @@ func TestAccountsDBApi_SimpleProxyMethodsShouldWork(t *testing.T) { }, GetTrieCalled: func(i []byte) (common.Trie, error) { getTrieCalled = true - return &trie.TrieStub{}, nil + return &testTrie.TrieStub{}, nil }, } @@ -300,7 +335,7 @@ func TestAccountsDBApi_GetExistingAccount(t *testing.T) { t.Parallel() accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return expectedErr }, GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { @@ -319,7 +354,7 @@ func TestAccountsDBApi_GetExistingAccount(t *testing.T) { recreateTrieCalled := false accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { recreateTrieCalled = true return nil }, @@ -344,7 +379,7 @@ func TestAccountsDBApi_GetAccountFromBytes(t *testing.T) { t.Parallel() accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return expectedErr }, GetAccountFromBytesCalled: func(address []byte, accountBytes []byte) (vmcommon.AccountHandler, error) { @@ -363,7 +398,7 @@ func TestAccountsDBApi_GetAccountFromBytes(t *testing.T) { recreateTrieCalled := false accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { recreateTrieCalled = true return nil }, @@ -388,7 +423,7 @@ func TestAccountsDBApi_LoadAccount(t *testing.T) { t.Parallel() accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return expectedErr }, LoadAccountCalled: func(address []byte) (vmcommon.AccountHandler, error) { @@ -407,7 +442,7 @@ func TestAccountsDBApi_LoadAccount(t *testing.T) { recreateTrieCalled := false accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { recreateTrieCalled = true return nil }, @@ -432,7 +467,7 @@ func TestAccountsDBApi_GetCode(t *testing.T) { t.Parallel() accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return expectedErr }, GetCodeCalled: func(codeHash []byte) []byte { @@ -451,7 +486,7 @@ func TestAccountsDBApi_GetCode(t *testing.T) { providedCode := []byte("code") recreateTrieCalled := false accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { recreateTrieCalled = true return nil }, @@ -475,7 +510,7 @@ func TestAccountsDBApi_GetAllLeaves(t *testing.T) { t.Parallel() accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { return expectedErr }, GetAllLeavesCalled: func(_ *common.TrieIteratorChannels, _ context.Context, _ []byte, _ common.TrieLeafParser) error { @@ -494,7 +529,7 @@ func TestAccountsDBApi_GetAllLeaves(t *testing.T) { providedChan := &common.TrieIteratorChannels{LeavesChan: make(chan core.KeyValueHolder)} recreateTrieCalled := false accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { recreateTrieCalled = true return nil }, @@ -519,13 +554,13 @@ func TestAccountsDBApi_GetAccountWithBlockInfoWhenHighConcurrency(t *testing.T) var currentBlockInfoMutex sync.RWMutex accountsAdapter := &mockState.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieCalled: func(rootHash common.RootHashHolder) error { dummyAccountMutex.Lock() defer dummyAccountMutex.Unlock() // When a trie is recreated, we "add" to it a single account, // having the balance correlated with the trie rootHash (for the sake of the test, for easier assertions). - dummyAccount = createDummyAccountWithBalanceBytes(rootHash) + dummyAccount = createDummyAccountWithBalanceBytes(rootHash.GetRootHash()) return nil }, GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index 529a2c4e5ee..00826b500a6 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -42,6 +42,7 @@ import ( trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-vm-common-go/dataTrieMigrator" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -109,17 +110,18 @@ func generateAddressAccountAccountsDB(trie common.Trie) ([]byte, *stateMock.Acco } func getDefaultTrieAndAccountsDb() (common.Trie, *state.AccountsDB) { - adb, tr, _ := getDefaultStateComponents(testscommon.NewSnapshotPruningStorerMock()) + adb, tr, _ := getDefaultStateComponents(testscommon.NewSnapshotPruningStorerMock(), &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) return tr, adb } func getDefaultTrieAndAccountsDbWithCustomDB(db common.BaseStorer) (common.Trie, *state.AccountsDB) { - adb, tr, _ := getDefaultStateComponents(db) + adb, tr, _ := getDefaultStateComponents(db, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}) return tr, adb } func getDefaultStateComponents( db common.BaseStorer, + enableEpochsHandler common.EnableEpochsHandler, ) (*state.AccountsDB, common.Trie, common.StorageManager) { generalCfg := config.TrieStorageManagerConfig{ PruningBufferLen: 1000, @@ -132,7 +134,7 @@ func getDefaultStateComponents( args := storage.GetStorageManagerArgs() args.MainStorer = db trieStorage, _ := trie.NewTrieStorageManager(args) - tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, 5) + tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, enableEpochsHandler, 5) ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ RootHashesSize: 100, HashesSize: 10000, @@ -142,7 +144,7 @@ func getDefaultStateComponents( argsAccCreator := factory.ArgsAccountCreator{ Hasher: hasher, Marshaller: marshaller, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandler, } accCreator, _ := factory.NewAccountCreator(argsAccCreator) @@ -527,7 +529,7 @@ func TestAccountsDB_LoadAccountExistingShouldLoadDataTrie(t *testing.T) { } return nil, 0, nil }, - RecreateCalled: func(root []byte) (d common.Trie, err error) { + RecreateCalled: func(holder common.RootHashHolder) (d common.Trie, err error) { return dataTrie, nil }, GetStorageManagerCalled: func() common.StorageManager { @@ -605,7 +607,7 @@ func TestAccountsDB_GetExistingAccountFoundShouldRetAccount(t *testing.T) { } return nil, 0, nil }, - RecreateCalled: func(root []byte) (d common.Trie, err error) { + RecreateCalled: func(root common.RootHashHolder) (d common.Trie, err error) { return dataTrie, nil }, GetStorageManagerCalled: func() common.StorageManager { @@ -828,8 +830,8 @@ func TestAccountsDB_LoadDataWithSomeValuesShouldWork(t *testing.T) { account := generateAccount() mockTrie := &trieMock.TrieStub{ - RecreateCalled: func(root []byte) (trie common.Trie, e error) { - if !bytes.Equal(root, rootHash) { + RecreateCalled: func(root common.RootHashHolder) (trie common.Trie, e error) { + if !bytes.Equal(root.GetRootHash(), rootHash) { return nil, errors.New("bad root hash") } @@ -873,7 +875,7 @@ func TestAccountsDB_CommitShouldCallCommitFromTrie(t *testing.T) { GetCalled: func(_ []byte) ([]byte, uint32, error) { return serializedAccount, 0, nil }, - RecreateCalled: func(root []byte) (trie common.Trie, err error) { + RecreateCalled: func(root common.RootHashHolder) (trie common.Trie, err error) { return &trieMock.TrieStub{ GetCalled: func(_ []byte) ([]byte, uint32, error) { return []byte("doge"), 0, nil @@ -921,14 +923,14 @@ func TestAccountsDB_RecreateTrieMalfunctionTrieShouldErr(t *testing.T) { return &storageManager.StorageManagerStub{} }, } - trieStub.RecreateFromEpochCalled = func(_ common.RootHashHolder) (tree common.Trie, e error) { + trieStub.RecreateCalled = func(_ common.RootHashHolder) (tree common.Trie, e error) { wasCalled = true return nil, errExpected } adb := generateAccountDBFromTrie(trieStub) - err := adb.RecreateTrie(nil) + err := adb.RecreateTrie(holders.NewDefaultRootHashesHolder([]byte{})) assert.Equal(t, errExpected, err) assert.True(t, wasCalled) } @@ -943,13 +945,13 @@ func TestAccountsDB_RecreateTrieOutputsNilTrieShouldErr(t *testing.T) { return &storageManager.StorageManagerStub{} }, } - trieStub.RecreateFromEpochCalled = func(_ common.RootHashHolder) (tree common.Trie, e error) { + trieStub.RecreateCalled = func(_ common.RootHashHolder) (tree common.Trie, e error) { wasCalled = true return nil, nil } adb := generateAccountDBFromTrie(&trieStub) - err := adb.RecreateTrie(nil) + err := adb.RecreateTrie(holders.NewDefaultRootHashesHolder([]byte{})) assert.Equal(t, state.ErrNilTrie, err) assert.True(t, wasCalled) @@ -965,14 +967,14 @@ func TestAccountsDB_RecreateTrieOkValsShouldWork(t *testing.T) { GetStorageManagerCalled: func() common.StorageManager { return &storageManager.StorageManagerStub{} }, - RecreateFromEpochCalled: func(_ common.RootHashHolder) (common.Trie, error) { + RecreateCalled: func(_ common.RootHashHolder) (common.Trie, error) { wasCalled = true return &trieMock.TrieStub{}, nil }, } adb := generateAccountDBFromTrie(&trieStub) - err := adb.RecreateTrie(nil) + err := adb.RecreateTrie(holders.NewDefaultRootHashesHolder([]byte{})) assert.Nil(t, err) assert.True(t, wasCalled) @@ -1434,7 +1436,7 @@ func TestAccountsDB_RecreateTrieInvalidatesJournalEntries(t *testing.T) { _ = adb.SaveAccount(acc) assert.Equal(t, 5, adb.JournalLen()) - err := adb.RecreateTrie(rootHash) + err := adb.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHash)) assert.Nil(t, err) assert.Equal(t, 0, adb.JournalLen()) } @@ -1953,7 +1955,7 @@ func TestAccountsDB_PruningAndPruningCancellingOnTrieRollback(t *testing.T) { } for i := 0; i < len(rootHashes); i++ { - _, err := tr.Recreate(rootHashes[i]) + _, err := tr.Recreate(holders.NewDefaultRootHashesHolder(rootHashes[i])) assert.Nil(t, err) } @@ -1962,7 +1964,7 @@ func TestAccountsDB_PruningAndPruningCancellingOnTrieRollback(t *testing.T) { finalizeTrieState(t, 2, tr, adb, rootHashes) rollbackTrieState(t, 3, tr, adb, rootHashes) - _, err := tr.Recreate(rootHashes[2]) + _, err := tr.Recreate(holders.NewDefaultRootHashesHolder(rootHashes[2])) assert.Nil(t, err) } @@ -1971,7 +1973,7 @@ func finalizeTrieState(t *testing.T, index int, tr common.Trie, adb state.Accoun adb.CancelPrune(rootHashes[index], state.NewRoot) time.Sleep(trieDbOperationDelay) - _, err := tr.Recreate(rootHashes[index-1]) + _, err := tr.Recreate(holders.NewDefaultRootHashesHolder(rootHashes[index-1])) assert.NotNil(t, err) } @@ -1980,7 +1982,7 @@ func rollbackTrieState(t *testing.T, index int, tr common.Trie, adb state.Accoun adb.CancelPrune(rootHashes[index-1], state.OldRoot) time.Sleep(trieDbOperationDelay) - _, err := tr.Recreate(rootHashes[index]) + _, err := tr.Recreate(holders.NewDefaultRootHashesHolder(rootHashes[index])) assert.NotNil(t, err) } @@ -2141,7 +2143,7 @@ func TestAccountsDB_RecreateAllTries(t *testing.T) { return nil }, - RecreateCalled: func(root []byte) (common.Trie, error) { + RecreateCalled: func(root common.RootHashHolder) (common.Trie, error) { return &trieMock.TrieStub{}, nil }, GetStorageManagerCalled: func() common.StorageManager { @@ -2172,7 +2174,7 @@ func TestAccountsDB_RecreateAllTries(t *testing.T) { return nil }, - RecreateCalled: func(root []byte) (common.Trie, error) { + RecreateCalled: func(root common.RootHashHolder) (common.Trie, error) { return &trieMock.TrieStub{}, nil }, GetStorageManagerCalled: func() common.StorageManager { @@ -2344,8 +2346,8 @@ func TestAccountsDB_GetAccountFromBytes(t *testing.T) { }, } args.Trie = &trieMock.TrieStub{ - RecreateCalled: func(root []byte) (common.Trie, error) { - assert.Equal(t, rootHash, root) + RecreateCalled: func(root common.RootHashHolder) (common.Trie, error) { + assert.Equal(t, rootHash, root.GetRootHash()) return &trieMock.TrieStub{}, nil }, GetStorageManagerCalled: func() common.StorageManager { @@ -2377,7 +2379,7 @@ func TestAccountsDB_GetAccountFromBytesShouldLoadDataTrie(t *testing.T) { } return nil, 0, nil }, - RecreateCalled: func(root []byte) (d common.Trie, err error) { + RecreateCalled: func(root common.RootHashHolder) (d common.Trie, err error) { return dataTrie, nil }, GetStorageManagerCalled: func() common.StorageManager { @@ -2928,7 +2930,7 @@ func testAccountMethodsConcurrency( assert.Nil(t, err) for i := 0; i < numOperations; i++ { go func(idx int) { - switch idx % 22 { + switch idx % 21 { case 0: _, _ = adb.GetExistingAccount(addresses[idx]) case 1: @@ -2952,26 +2954,24 @@ func testAccountMethodsConcurrency( case 10: _, _ = adb.RootHash() case 11: - _ = adb.RecreateTrie(rootHash) + _ = adb.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHash)) case 12: - _ = adb.RecreateTrieFromEpoch(holders.NewRootHashHolder(rootHash, core.OptionalUint32{})) - case 13: adb.PruneTrie(rootHash, state.OldRoot, state.NewPruningHandler(state.DisableDataRemoval)) - case 14: + case 13: adb.CancelPrune(rootHash, state.NewRoot) - case 15: + case 14: adb.SnapshotState(rootHash, 0) - case 16: + case 15: _ = adb.IsPruningEnabled() - case 17: + case 16: _ = adb.GetAllLeaves(&common.TrieIteratorChannels{}, context.Background(), rootHash, parsers.NewMainTrieLeafParser()) - case 18: + case 17: _, _ = adb.RecreateAllTries(rootHash) - case 19: + case 18: _, _ = adb.GetTrie(rootHash) - case 20: + case 19: _ = adb.GetStackDebugFirstEntry() - case 21: + case 20: _ = adb.SetSyncer(&mock.AccountsDBSyncerStub{}) } wg.Done() @@ -2981,6 +2981,52 @@ func testAccountMethodsConcurrency( wg.Wait() } +func TestAccountsDB_MigrateDataTrieWithFunc(t *testing.T) { + t.Parallel() + + enableEpochsHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() + adb, _, _ := getDefaultStateComponents(testscommon.NewSnapshotPruningStorerMock(), enableEpochsHandler) + + addr := []byte("addr") + acc, _ := adb.LoadAccount(addr) + value := []byte("value") + _ = acc.(state.UserAccountHandler).SaveKeyValue([]byte("key"), value) + _ = acc.(state.UserAccountHandler).SaveKeyValue([]byte("key2"), value) + _ = adb.SaveAccount(acc) + + enableEpochsHandler.AddActiveFlags(common.AutoBalanceDataTriesFlag) + acc, _ = adb.LoadAccount(addr) + + isMigrated, err := acc.(state.AccountHandlerWithDataTrieMigrationStatus).IsDataTrieMigrated() + assert.Nil(t, err) + assert.False(t, isMigrated) + + accWithMigrate := acc.(vmcommon.UserAccountHandler).AccountDataHandler() + dataTrieMig := dataTrieMigrator.NewDataTrieMigrator(dataTrieMigrator.ArgsNewDataTrieMigrator{ + GasProvided: 100000000, + DataTrieGasCost: dataTrieMigrator.DataTrieGasCost{ + TrieLoadPerNode: 1, + TrieStorePerNode: 1, + }, + }) + err = accWithMigrate.MigrateDataTrieLeaves(vmcommon.ArgsMigrateDataTrieLeaves{ + OldVersion: core.NotSpecified, + NewVersion: core.AutoBalanceEnabled, + TrieMigrator: dataTrieMig, + }) + assert.Nil(t, err) + _ = adb.SaveAccount(acc) + + acc, _ = adb.LoadAccount(addr) + retrievedVal, _, err := acc.(state.UserAccountHandler).RetrieveValue([]byte("key")) + assert.Equal(t, value, retrievedVal) + assert.Nil(t, err) + + isMigrated, err = acc.(state.AccountHandlerWithDataTrieMigrationStatus).IsDataTrieMigrated() + assert.Nil(t, err) + assert.True(t, isMigrated) +} + func BenchmarkAccountsDB_GetMethodsInParallel(b *testing.B) { _, adb := getDefaultTrieAndAccountsDb() @@ -3015,7 +3061,7 @@ func testAccountLoadInParallel( case 1: _, _ = adb.GetExistingAccount(addresses[idx]) case 2: - _ = adb.RecreateTrie(rootHash) + _ = adb.RecreateTrie(holders.NewDefaultRootHashesHolder(rootHash)) } }(i) } diff --git a/state/errors.go b/state/errors.go index fd8c0057241..168dc098b98 100644 --- a/state/errors.go +++ b/state/errors.go @@ -145,6 +145,9 @@ var ErrNilStateMetrics = errors.New("nil sstate metrics") // ErrNilChannelsProvider signals that a nil channels provider has been given var ErrNilChannelsProvider = errors.New("nil channels provider") +// ErrNilRootHashHolder signals that a nil root hash holder was provided +var ErrNilRootHashHolder = errors.New("nil root hash holder provided") + // ErrNilStatsHandler signals that a nil stats handler provider has been given var ErrNilStatsHandler = errors.New("nil stats handler") @@ -153,3 +156,12 @@ var ErrNilLastSnapshotMarker = errors.New("nil last snapshot marker") // ErrNilSnapshotsManager signals that a nil snapshots manager has been given var ErrNilSnapshotsManager = errors.New("nil snapshots manager") + +// ErrNilValidatorInfo signals that a nil value for the validator info has been provided +var ErrNilValidatorInfo = errors.New("validator info is nil") + +// ErrValidatorsDifferentShards signals that validators are not in the same shard +var ErrValidatorsDifferentShards = errors.New("validators are not in the same shard") + +// ErrValidatorNotFound signals that a validator was not found +var ErrValidatorNotFound = errors.New("validator not found") diff --git a/state/export_test.go b/state/export_test.go index 0045adc880c..4398d616dd3 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -90,3 +90,9 @@ func (sm *snapshotsManager) GetLastSnapshotInfo() ([]byte, uint32) { func NewNilSnapshotsManager() *snapshotsManager { return nil } + +// AccountHandlerWithDataTrieMigrationStatus - +type AccountHandlerWithDataTrieMigrationStatus interface { + vmcommon.AccountHandler + IsDataTrieMigrated() (bool, error) +} diff --git a/state/interface.go b/state/interface.go index 06050f95fcb..a9b460957b1 100644 --- a/state/interface.go +++ b/state/interface.go @@ -6,8 +6,9 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/api" - "github.com/multiversx/mx-chain-go/common" vmcommon "github.com/multiversx/mx-chain-vm-common-go" + + "github.com/multiversx/mx-chain-go/common" ) // AccountFactory creates an account of different types @@ -23,6 +24,47 @@ type Updater interface { IsInterfaceNil() bool } +// PeerAccountHandler models a peer state account, which can journalize a normal account's data +// with some extra features like signing statistics or rating information +type PeerAccountHandler interface { + GetBLSPublicKey() []byte + SetBLSPublicKey([]byte) error + GetRewardAddress() []byte + SetRewardAddress([]byte) error + GetAccumulatedFees() *big.Int + AddToAccumulatedFees(*big.Int) + GetList() string + GetPreviousList() string + GetIndexInList() uint32 + GetPreviousIndexInList() uint32 + GetShardId() uint32 + SetUnStakedEpoch(epoch uint32) + GetUnStakedEpoch() uint32 + IncreaseLeaderSuccessRate(uint32) + DecreaseLeaderSuccessRate(uint32) + IncreaseValidatorSuccessRate(uint32) + DecreaseValidatorSuccessRate(uint32) + IncreaseValidatorIgnoredSignaturesRate(uint32) + GetNumSelectedInSuccessBlocks() uint32 + IncreaseNumSelectedInSuccessBlocks() + GetLeaderSuccessRate() SignRate + GetValidatorSuccessRate() SignRate + GetValidatorIgnoredSignaturesRate() uint32 + GetTotalLeaderSuccessRate() SignRate + GetTotalValidatorSuccessRate() SignRate + GetTotalValidatorIgnoredSignaturesRate() uint32 + SetListAndIndex(shardID uint32, list string, index uint32, updatePreviousValues bool) + GetRating() uint32 + SetRating(uint32) + GetTempRating() uint32 + SetTempRating(uint32) + GetConsecutiveProposerMisses() uint32 + SetConsecutiveProposerMisses(consecutiveMisses uint32) + ResetAtNewEpoch() + SetPreviousList(list string) + vmcommon.AccountHandler +} + // AccountsAdapter is used for the structure that manages the accounts on top of a trie.PatriciaMerkleTrie // implementation type AccountsAdapter interface { @@ -37,8 +79,7 @@ type AccountsAdapter interface { RevertToSnapshot(snapshot int) error GetCode(codeHash []byte) []byte RootHash() ([]byte, error) - RecreateTrie(rootHash []byte) error - RecreateTrieFromEpoch(options common.RootHashHolder) error + RecreateTrie(options common.RootHashHolder) error PruneTrie(rootHash []byte, identifier TriePruningIdentifier, handler PruningHandler) CancelPrune(rootHash []byte, identifier TriePruningIdentifier) SnapshotState(rootHash []byte, epoch uint32) @@ -180,43 +221,6 @@ type DataTrie interface { CollectLeavesForMigration(args vmcommon.ArgsMigrateDataTrieLeaves) error } -// PeerAccountHandler models a peer state account, which can journalize a normal account's data -// with some extra features like signing statistics or rating information -type PeerAccountHandler interface { - SetBLSPublicKey([]byte) error - GetRewardAddress() []byte - SetRewardAddress([]byte) error - GetAccumulatedFees() *big.Int - AddToAccumulatedFees(*big.Int) - GetList() string - GetIndexInList() uint32 - GetShardId() uint32 - SetUnStakedEpoch(epoch uint32) - GetUnStakedEpoch() uint32 - IncreaseLeaderSuccessRate(uint32) - DecreaseLeaderSuccessRate(uint32) - IncreaseValidatorSuccessRate(uint32) - DecreaseValidatorSuccessRate(uint32) - IncreaseValidatorIgnoredSignaturesRate(uint32) - GetNumSelectedInSuccessBlocks() uint32 - IncreaseNumSelectedInSuccessBlocks() - GetLeaderSuccessRate() SignRate - GetValidatorSuccessRate() SignRate - GetValidatorIgnoredSignaturesRate() uint32 - GetTotalLeaderSuccessRate() SignRate - GetTotalValidatorSuccessRate() SignRate - GetTotalValidatorIgnoredSignaturesRate() uint32 - SetListAndIndex(shardID uint32, list string, index uint32) - GetRating() uint32 - SetRating(uint32) - GetTempRating() uint32 - SetTempRating(uint32) - GetConsecutiveProposerMisses() uint32 - SetConsecutiveProposerMisses(uint322 uint32) - ResetAtNewEpoch() - vmcommon.AccountHandler -} - // UserAccountHandler models a user account, which can journalize account's data with some extra features // like balance, developer rewards, owner type UserAccountHandler interface { @@ -278,3 +282,74 @@ type LastSnapshotMarker interface { GetMarkerInfo(trieStorageManager common.StorageManager) ([]byte, error) IsInterfaceNil() bool } + +// ShardValidatorsInfoMapHandler shall be used to manage operations inside +// a map in a concurrent-safe way. +type ShardValidatorsInfoMapHandler interface { + GetShardValidatorsInfoMap() map[uint32][]ValidatorInfoHandler + GetAllValidatorsInfo() []ValidatorInfoHandler + GetValidator(blsKey []byte) ValidatorInfoHandler + + Add(validator ValidatorInfoHandler) error + Delete(validator ValidatorInfoHandler) error + DeleteByKey(blsKey []byte, shardID uint32) + Replace(old ValidatorInfoHandler, new ValidatorInfoHandler) error + ReplaceValidatorByKey(oldBlsKey []byte, new ValidatorInfoHandler, shardID uint32) bool + SetValidatorsInShard(shardID uint32, validators []ValidatorInfoHandler) error + SetValidatorsInShardUnsafe(shardID uint32, validators []ValidatorInfoHandler) +} + +// ValidatorInfoHandler defines which data shall a validator info hold. +type ValidatorInfoHandler interface { + IsInterfaceNil() bool + + GetPublicKey() []byte + GetShardId() uint32 + GetList() string + GetIndex() uint32 + GetPreviousIndex() uint32 + GetTempRating() uint32 + GetRating() uint32 + GetRatingModifier() float32 + GetRewardAddress() []byte + GetLeaderSuccess() uint32 + GetLeaderFailure() uint32 + GetValidatorSuccess() uint32 + GetValidatorFailure() uint32 + GetValidatorIgnoredSignatures() uint32 + GetNumSelectedInSuccessBlocks() uint32 + GetAccumulatedFees() *big.Int + GetTotalLeaderSuccess() uint32 + GetTotalLeaderFailure() uint32 + GetTotalValidatorSuccess() uint32 + GetTotalValidatorFailure() uint32 + GetTotalValidatorIgnoredSignatures() uint32 + GetPreviousList() string + + SetPublicKey(publicKey []byte) + SetShardId(shardID uint32) + SetPreviousList(list string) + SetList(list string) + SetIndex(index uint32) + SetListAndIndex(list string, index uint32, updatePreviousValues bool) + SetTempRating(tempRating uint32) + SetRating(rating uint32) + SetRatingModifier(ratingModifier float32) + SetRewardAddress(rewardAddress []byte) + SetLeaderSuccess(leaderSuccess uint32) + SetLeaderFailure(leaderFailure uint32) + SetValidatorSuccess(validatorSuccess uint32) + SetValidatorFailure(validatorFailure uint32) + SetValidatorIgnoredSignatures(validatorIgnoredSignatures uint32) + SetNumSelectedInSuccessBlocks(numSelectedInSuccessBlock uint32) + SetAccumulatedFees(accumulatedFees *big.Int) + SetTotalLeaderSuccess(totalLeaderSuccess uint32) + SetTotalLeaderFailure(totalLeaderFailure uint32) + SetTotalValidatorSuccess(totalValidatorSuccess uint32) + SetTotalValidatorFailure(totalValidatorFailure uint32) + SetTotalValidatorIgnoredSignatures(totalValidatorIgnoredSignatures uint32) + + ShallowClone() ValidatorInfoHandler + String() string + GoString() string +} diff --git a/state/peerAccountsDB_test.go b/state/peerAccountsDB_test.go index 2165357c7ec..3e1f2322c24 100644 --- a/state/peerAccountsDB_test.go +++ b/state/peerAccountsDB_test.go @@ -163,7 +163,7 @@ func TestNewPeerAccountsDB_RecreateAllTries(t *testing.T) { GetStorageManagerCalled: func() common.StorageManager { return &storageManager.StorageManagerStub{} }, - RecreateCalled: func(_ []byte) (common.Trie, error) { + RecreateCalled: func(_ common.RootHashHolder) (common.Trie, error) { recreateCalled = true return nil, nil }, diff --git a/state/storagePruningManager/storagePruningManager_test.go b/state/storagePruningManager/storagePruningManager_test.go index d195d4ef5c9..25bb2860b84 100644 --- a/state/storagePruningManager/storagePruningManager_test.go +++ b/state/storagePruningManager/storagePruningManager_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/state" @@ -223,7 +224,7 @@ func TestAccountsDB_PruneAfterCancelPruneShouldFail(t *testing.T) { spm.CancelPrune(rootHash, state.OldRoot, trieStorage) spm.PruneTrie(rootHash, state.OldRoot, trieStorage, state.NewPruningHandler(state.EnableDataRemoval)) - newTr, err := tr.Recreate(rootHash) + newTr, err := tr.Recreate(holders.NewDefaultRootHashesHolder(rootHash)) assert.Nil(t, err) assert.NotNil(t, newTr) } diff --git a/state/syncer/baseAccountsSyncer.go b/state/syncer/baseAccountsSyncer.go index e6bf39f45b2..3cee93d7325 100644 --- a/state/syncer/baseAccountsSyncer.go +++ b/state/syncer/baseAccountsSyncer.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/trie" @@ -225,7 +226,8 @@ func (b *baseAccountsSyncer) GetSyncedTries() map[string]common.Trie { var recreatedTrie common.Trie clonedMap := make(map[string]common.Trie, len(b.dataTries)) for key := range b.dataTries { - recreatedTrie, err = dataTrie.Recreate([]byte(key)) + rootHashHolder := holders.NewDefaultRootHashesHolder([]byte(key)) + recreatedTrie, err = dataTrie.Recreate(rootHashHolder) if err != nil { log.Warn("error recreating trie in baseAccountsSyncer.GetSyncedTries", "roothash", []byte(key), "error", err) diff --git a/state/trackableDataTrie/trackableDataTrie.go b/state/trackableDataTrie/trackableDataTrie.go index e7c874e7dbf..5808e3833e2 100644 --- a/state/trackableDataTrie/trackableDataTrie.go +++ b/state/trackableDataTrie/trackableDataTrie.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/holders" errorsCommon "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/dataTrieValue" @@ -133,8 +134,13 @@ func (tdt *trackableDataTrie) MigrateDataTrieLeaves(args vmcommon.ArgsMigrateDat dataToBeMigrated := args.TrieMigrator.GetLeavesToBeMigrated() log.Debug("num leaves to be migrated", "num", len(dataToBeMigrated), "account", tdt.identifier) for _, leafData := range dataToBeMigrated { + val, err := tdt.getValueWithoutMetadata(leafData.Key, leafData) + if err != nil { + return err + } + dataEntry := dirtyData{ - value: leafData.Value, + value: val, newVersion: args.NewVersion, } @@ -209,7 +215,8 @@ func (tdt *trackableDataTrie) SaveDirtyData(mainTrie common.Trie) ([]core.TrieDa } if check.IfNil(tdt.tr) { - newDataTrie, err := mainTrie.Recreate(make([]byte, 0)) + emptyRootHash := holders.NewDefaultRootHashesHolder(make([]byte, 0)) + newDataTrie, err := mainTrie.Recreate(emptyRootHash) if err != nil { return nil, err } diff --git a/state/trackableDataTrie/trackableDataTrie_test.go b/state/trackableDataTrie/trackableDataTrie_test.go index e5aca45a0ad..fb7b47182df 100644 --- a/state/trackableDataTrie/trackableDataTrie_test.go +++ b/state/trackableDataTrie/trackableDataTrie_test.go @@ -367,7 +367,7 @@ func TestTrackableDataTrie_SaveDirtyData(t *testing.T) { recreateCalled := false trie := &trieMock.TrieStub{ - RecreateCalled: func(root []byte) (common.Trie, error) { + RecreateCalled: func(root common.RootHashHolder) (common.Trie, error) { recreateCalled = true return &trieMock.TrieStub{ GetCalled: func(_ []byte) ([]byte, uint32, error) { @@ -863,20 +863,22 @@ func TestTrackableDataTrie_MigrateDataTrieLeaves(t *testing.T) { t.Run("leaves that need to be migrated are added to dirty data", func(t *testing.T) { t.Parallel() + expectedValues := [][]byte{[]byte("value1"), []byte("value2"), []byte("value3")} + address := []byte("identifier") leavesToBeMigrated := []core.TrieData{ { Key: []byte("key1"), - Value: []byte("value1"), + Value: append([]byte("value1key1"), address...), Version: core.NotSpecified, }, { Key: []byte("key2"), - Value: []byte("value2"), + Value: append([]byte("value2key2"), address...), Version: core.NotSpecified, }, { Key: []byte("key3"), - Value: []byte("value3"), + Value: append([]byte("value3key3"), address...), Version: core.NotSpecified, }, } @@ -896,7 +898,7 @@ func TestTrackableDataTrie_MigrateDataTrieLeaves(t *testing.T) { }, } - tdt, _ := trackableDataTrie.NewTrackableDataTrie([]byte("identifier"), &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpchs) + tdt, _ := trackableDataTrie.NewTrackableDataTrie(address, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpchs) tdt.SetDataTrie(tr) args := vmcommon.ArgsMigrateDataTrieLeaves{ OldVersion: core.NotSpecified, @@ -910,7 +912,7 @@ func TestTrackableDataTrie_MigrateDataTrieLeaves(t *testing.T) { assert.Equal(t, len(leavesToBeMigrated), len(dirtyData)) for i := range leavesToBeMigrated { d := dirtyData[string(leavesToBeMigrated[i].Key)] - assert.Equal(t, leavesToBeMigrated[i].Value, d.Value) + assert.Equal(t, expectedValues[i], d.Value) assert.Equal(t, core.TrieNodeVersion(100), d.NewVersion) } }) diff --git a/state/validatorInfo.go b/state/validatorInfo.go index 2ca0cf416e0..c6ea6d06001 100644 --- a/state/validatorInfo.go +++ b/state/validatorInfo.go @@ -2,11 +2,138 @@ package state +import mathbig "math/big" + // IsInterfaceNil returns true if there is no value under the interface func (vi *ValidatorInfo) IsInterfaceNil() bool { return vi == nil } +// SetPublicKey sets validator's public key +func (vi *ValidatorInfo) SetPublicKey(publicKey []byte) { + vi.PublicKey = publicKey +} + +// SetList sets validator's list +func (vi *ValidatorInfo) SetList(list string) { + vi.List = list +} + +// SetPreviousList sets validator's previous list +func (vi *ValidatorInfo) SetPreviousList(list string) { + vi.PreviousList = list +} + +func (vi *ValidatorInfo) SetListAndIndex(list string, index uint32, updatePreviousValues bool) { + if updatePreviousValues { + vi.PreviousIndex = vi.Index + vi.PreviousList = vi.List + } + + vi.List = list + vi.Index = index +} + +// SetShardId sets validator's public shard id +func (vi *ValidatorInfo) SetShardId(shardID uint32) { + vi.ShardId = shardID +} + +// SetIndex sets validator's index +func (vi *ValidatorInfo) SetIndex(index uint32) { + vi.Index = index +} + +// SetTempRating sets validator's temp rating +func (vi *ValidatorInfo) SetTempRating(tempRating uint32) { + vi.TempRating = tempRating +} + +// SetRating sets validator's rating +func (vi *ValidatorInfo) SetRating(rating uint32) { + vi.Rating = rating +} + +// SetRatingModifier sets validator's rating modifier +func (vi *ValidatorInfo) SetRatingModifier(ratingModifier float32) { + vi.RatingModifier = ratingModifier +} + +// SetRewardAddress sets validator's reward address +func (vi *ValidatorInfo) SetRewardAddress(rewardAddress []byte) { + vi.RewardAddress = rewardAddress +} + +// SetLeaderSuccess sets leader success +func (vi *ValidatorInfo) SetLeaderSuccess(leaderSuccess uint32) { + vi.LeaderSuccess = leaderSuccess +} + +// SetLeaderFailure sets validator's leader failure +func (vi *ValidatorInfo) SetLeaderFailure(leaderFailure uint32) { + vi.LeaderFailure = leaderFailure +} + +// SetValidatorSuccess sets validator's success +func (vi *ValidatorInfo) SetValidatorSuccess(validatorSuccess uint32) { + vi.ValidatorSuccess = validatorSuccess +} + +// SetValidatorFailure sets validator's failure +func (vi *ValidatorInfo) SetValidatorFailure(validatorFailure uint32) { + vi.ValidatorFailure = validatorFailure +} + +// SetValidatorIgnoredSignatures sets validator's ignored signatures +func (vi *ValidatorInfo) SetValidatorIgnoredSignatures(validatorIgnoredSignatures uint32) { + vi.ValidatorIgnoredSignatures = validatorIgnoredSignatures +} + +// SetNumSelectedInSuccessBlocks sets validator's num of selected in success block +func (vi *ValidatorInfo) SetNumSelectedInSuccessBlocks(numSelectedInSuccessBlock uint32) { + vi.NumSelectedInSuccessBlocks = numSelectedInSuccessBlock +} + +// SetAccumulatedFees sets validator's accumulated fees +func (vi *ValidatorInfo) SetAccumulatedFees(accumulatedFees *mathbig.Int) { + vi.AccumulatedFees = mathbig.NewInt(0).Set(accumulatedFees) +} + +// SetTotalLeaderSuccess sets validator's total leader success +func (vi *ValidatorInfo) SetTotalLeaderSuccess(totalLeaderSuccess uint32) { + vi.TotalLeaderSuccess = totalLeaderSuccess +} + +// SetTotalLeaderFailure sets validator's total leader failure +func (vi *ValidatorInfo) SetTotalLeaderFailure(totalLeaderFailure uint32) { + vi.TotalLeaderFailure = totalLeaderFailure +} + +// SetTotalValidatorSuccess sets validator's total success +func (vi *ValidatorInfo) SetTotalValidatorSuccess(totalValidatorSuccess uint32) { + vi.TotalValidatorSuccess = totalValidatorSuccess +} + +// SetTotalValidatorFailure sets validator's total failure +func (vi *ValidatorInfo) SetTotalValidatorFailure(totalValidatorFailure uint32) { + vi.TotalValidatorFailure = totalValidatorFailure +} + +// SetTotalValidatorIgnoredSignatures sets validator's total ignored signatures +func (vi *ValidatorInfo) SetTotalValidatorIgnoredSignatures(totalValidatorIgnoredSignatures uint32) { + vi.TotalValidatorIgnoredSignatures = totalValidatorIgnoredSignatures +} + +// ShallowClone returns a clone of the object +func (vi *ValidatorInfo) ShallowClone() ValidatorInfoHandler { + if vi == nil { + return nil + } + + validatorCopy := *vi + return &validatorCopy +} + // IsInterfaceNil returns true if there is no value under the interface func (svi *ShardValidatorInfo) IsInterfaceNil() bool { return svi == nil diff --git a/state/validatorInfo.pb.go b/state/validatorInfo.pb.go index 19907c86869..3261e3da880 100644 --- a/state/validatorInfo.pb.go +++ b/state/validatorInfo.pb.go @@ -51,6 +51,8 @@ type ValidatorInfo struct { TotalValidatorSuccess uint32 `protobuf:"varint,18,opt,name=TotalValidatorSuccess,proto3" json:"totalValidatorSuccess"` TotalValidatorFailure uint32 `protobuf:"varint,19,opt,name=TotalValidatorFailure,proto3" json:"totalValidatorFailure"` TotalValidatorIgnoredSignatures uint32 `protobuf:"varint,20,opt,name=TotalValidatorIgnoredSignatures,proto3" json:"totalValidatorIgnoredSignatures"` + PreviousList string `protobuf:"bytes,21,opt,name=PreviousList,proto3" json:"previousList,omitempty"` + PreviousIndex uint32 `protobuf:"varint,22,opt,name=PreviousIndex,proto3" json:"previousIndex,omitempty"` } func (m *ValidatorInfo) Reset() { *m = ValidatorInfo{} } @@ -221,13 +223,29 @@ func (m *ValidatorInfo) GetTotalValidatorIgnoredSignatures() uint32 { return 0 } +func (m *ValidatorInfo) GetPreviousList() string { + if m != nil { + return m.PreviousList + } + return "" +} + +func (m *ValidatorInfo) GetPreviousIndex() uint32 { + if m != nil { + return m.PreviousIndex + } + return 0 +} + // ShardValidatorInfo represents the data regarding a validator that is stored in the PeerMiniblocks type ShardValidatorInfo struct { - PublicKey []byte `protobuf:"bytes,1,opt,name=PublicKey,proto3" json:"publicKey"` - ShardId uint32 `protobuf:"varint,2,opt,name=ShardId,proto3" json:"shardId"` - List string `protobuf:"bytes,3,opt,name=List,proto3" json:"list,omitempty"` - Index uint32 `protobuf:"varint,4,opt,name=Index,proto3" json:"index"` - TempRating uint32 `protobuf:"varint,5,opt,name=TempRating,proto3" json:"tempRating"` + PublicKey []byte `protobuf:"bytes,1,opt,name=PublicKey,proto3" json:"publicKey"` + ShardId uint32 `protobuf:"varint,2,opt,name=ShardId,proto3" json:"shardId"` + List string `protobuf:"bytes,3,opt,name=List,proto3" json:"list,omitempty"` + Index uint32 `protobuf:"varint,4,opt,name=Index,proto3" json:"index"` + TempRating uint32 `protobuf:"varint,5,opt,name=TempRating,proto3" json:"tempRating"` + PreviousList string `protobuf:"bytes,6,opt,name=PreviousList,proto3" json:"previousList,omitempty"` + PreviousIndex uint32 `protobuf:"varint,7,opt,name=PreviousIndex,proto3" json:"previousIndex,omitempty"` } func (m *ShardValidatorInfo) Reset() { *m = ShardValidatorInfo{} } @@ -293,6 +311,20 @@ func (m *ShardValidatorInfo) GetTempRating() uint32 { return 0 } +func (m *ShardValidatorInfo) GetPreviousList() string { + if m != nil { + return m.PreviousList + } + return "" +} + +func (m *ShardValidatorInfo) GetPreviousIndex() uint32 { + if m != nil { + return m.PreviousIndex + } + return 0 +} + func init() { proto.RegisterType((*ValidatorInfo)(nil), "proto.ValidatorInfo") proto.RegisterType((*ShardValidatorInfo)(nil), "proto.ShardValidatorInfo") @@ -301,52 +333,56 @@ func init() { func init() { proto.RegisterFile("validatorInfo.proto", fileDescriptor_bf9cdc082f0b2ec2) } var fileDescriptor_bf9cdc082f0b2ec2 = []byte{ - // 714 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0x4f, 0x4f, 0x13, 0x41, - 0x18, 0xc6, 0xbb, 0x48, 0x0b, 0x1d, 0x68, 0x81, 0x01, 0x74, 0x41, 0xb3, 0xd3, 0x60, 0x34, 0x4d, - 0xb4, 0xed, 0xc1, 0x83, 0x89, 0x1e, 0x94, 0x1a, 0x49, 0x1a, 0xf1, 0x4f, 0xa6, 0xc4, 0x83, 0x07, - 0x93, 0xe9, 0xee, 0x74, 0x3b, 0x71, 0xff, 0x90, 0xd9, 0xd9, 0x0a, 0x37, 0x3f, 0x02, 0x1f, 0xc3, - 0xf8, 0x49, 0x3c, 0x72, 0xe4, 0xb4, 0xd8, 0xe5, 0x62, 0xe6, 0xc4, 0x47, 0x30, 0x9d, 0x76, 0x69, - 0xb7, 0x2d, 0x78, 0xe2, 0xc4, 0xee, 0xfb, 0x3c, 0xcf, 0x6f, 0x5e, 0xfa, 0x4e, 0xdf, 0x82, 0xf5, - 0x2e, 0x71, 0x98, 0x45, 0x84, 0xcf, 0x1b, 0x5e, 0xdb, 0xaf, 0x1e, 0x72, 0x5f, 0xf8, 0x30, 0xab, - 0xfe, 0x6c, 0x57, 0x6c, 0x26, 0x3a, 0x61, 0xab, 0x6a, 0xfa, 0x6e, 0xcd, 0xf6, 0x6d, 0xbf, 0xa6, - 0xca, 0xad, 0xb0, 0xad, 0xde, 0xd4, 0x8b, 0x7a, 0x1a, 0xa4, 0x76, 0xce, 0x01, 0x28, 0x7c, 0x1e, - 0xa7, 0xc1, 0x27, 0x20, 0xff, 0x29, 0x6c, 0x39, 0xcc, 0x7c, 0x47, 0x8f, 0x75, 0xad, 0xa4, 0x95, - 0x97, 0xeb, 0x05, 0x19, 0xa1, 0xfc, 0x61, 0x52, 0xc4, 0x23, 0x1d, 0x3e, 0x02, 0x0b, 0xcd, 0x0e, - 0xe1, 0x56, 0xc3, 0xd2, 0xe7, 0x4a, 0x5a, 0xb9, 0x50, 0x5f, 0x92, 0x11, 0x5a, 0x08, 0x06, 0x25, - 0x9c, 0x68, 0xf0, 0x01, 0x98, 0xdf, 0x67, 0x81, 0xd0, 0xef, 0x94, 0xb4, 0x72, 0xbe, 0xbe, 0x28, - 0x23, 0x34, 0xef, 0xb0, 0x40, 0x60, 0x55, 0x85, 0x08, 0x64, 0x1b, 0x9e, 0x45, 0x8f, 0xf4, 0x79, - 0x85, 0xc8, 0xcb, 0x08, 0x65, 0x59, 0xbf, 0x80, 0x07, 0x75, 0x58, 0x05, 0xe0, 0x80, 0xba, 0x87, - 0x98, 0x08, 0xe6, 0xd9, 0x7a, 0x56, 0xb9, 0x8a, 0x32, 0x42, 0x40, 0x5c, 0x55, 0xf1, 0x98, 0x03, - 0xee, 0x80, 0xdc, 0xd0, 0x9b, 0x53, 0x5e, 0x20, 0x23, 0x94, 0xe3, 0x03, 0xdf, 0x50, 0x81, 0x2f, - 0x40, 0x71, 0xf0, 0xf4, 0xde, 0xb7, 0x58, 0x9b, 0x51, 0xae, 0x2f, 0x94, 0xb4, 0xf2, 0x5c, 0x1d, - 0xca, 0x08, 0x15, 0x79, 0x4a, 0xc1, 0x13, 0x4e, 0xb8, 0x0b, 0x0a, 0x98, 0x7e, 0x27, 0xdc, 0xda, - 0xb5, 0x2c, 0x4e, 0x83, 0x40, 0x5f, 0x54, 0x1f, 0xd3, 0x7d, 0x19, 0xa1, 0x7b, 0x7c, 0x5c, 0x78, - 0xea, 0xbb, 0xac, 0xdf, 0xa3, 0x38, 0xc6, 0xe9, 0x04, 0x7c, 0x0e, 0x0a, 0xfb, 0x94, 0x58, 0x94, - 0x37, 0x43, 0xd3, 0xec, 0x23, 0xf2, 0xaa, 0xd3, 0x35, 0x19, 0xa1, 0x82, 0x33, 0x2e, 0xe0, 0xb4, - 0x6f, 0x14, 0xdc, 0x23, 0xcc, 0x09, 0x39, 0xd5, 0xc1, 0x64, 0x70, 0x28, 0xe0, 0xb4, 0x0f, 0xbe, - 0x06, 0xab, 0x57, 0x83, 0x4e, 0x0e, 0x5d, 0x52, 0xd9, 0x0d, 0x19, 0xa1, 0xd5, 0xee, 0x84, 0x86, - 0xa7, 0xdc, 0x29, 0x42, 0x72, 0xfa, 0xf2, 0x0c, 0x42, 0xd2, 0xc0, 0x94, 0x1b, 0x7e, 0x05, 0xdb, - 0xa3, 0xcb, 0x66, 0x7b, 0x3e, 0xa7, 0x56, 0x93, 0xd9, 0x1e, 0x11, 0x21, 0xa7, 0x81, 0x5e, 0x50, - 0x2c, 0x43, 0x46, 0x68, 0xbb, 0x7b, 0xad, 0x0b, 0xdf, 0x40, 0xe8, 0xf3, 0x3f, 0x84, 0x6e, 0x93, - 0x3a, 0xd4, 0x14, 0xd4, 0x6a, 0x78, 0xc3, 0xce, 0xeb, 0x8e, 0x6f, 0x7e, 0x0b, 0xf4, 0xe2, 0x88, - 0xef, 0x5d, 0xeb, 0xc2, 0x37, 0x10, 0xe0, 0x89, 0x06, 0x56, 0x76, 0x4d, 0x33, 0x74, 0x43, 0x87, - 0x08, 0x6a, 0xed, 0x51, 0x1a, 0xe8, 0x2b, 0x6a, 0xf6, 0x6d, 0x19, 0xa1, 0x2d, 0x92, 0x96, 0x46, - 0xd3, 0xff, 0x75, 0x8e, 0xde, 0xba, 0x44, 0x74, 0x6a, 0x2d, 0x66, 0x57, 0x1b, 0x9e, 0x78, 0x39, - 0xf6, 0x25, 0x75, 0x43, 0x47, 0xb0, 0x2e, 0xe5, 0xc1, 0x51, 0xcd, 0x3d, 0xaa, 0x98, 0x1d, 0xc2, - 0xbc, 0x8a, 0xe9, 0x73, 0x5a, 0xb1, 0xfd, 0x9a, 0x45, 0x04, 0xa9, 0xd6, 0x99, 0xdd, 0xf0, 0xc4, - 0x1b, 0x12, 0x08, 0xca, 0xf1, 0xe4, 0xf1, 0x70, 0x0f, 0xc0, 0x03, 0x5f, 0x10, 0x27, 0x7d, 0x9b, - 0x56, 0xd5, 0xbf, 0x7a, 0x57, 0x46, 0x08, 0x8a, 0x29, 0x15, 0xcf, 0x48, 0x4c, 0x70, 0x92, 0xf1, - 0xae, 0xcd, 0xe4, 0x24, 0x03, 0x9e, 0x91, 0x80, 0x1f, 0xc1, 0xa6, 0xaa, 0x4e, 0xdd, 0x35, 0xa8, - 0x50, 0x5b, 0x32, 0x42, 0x9b, 0x62, 0x96, 0x01, 0xcf, 0xce, 0x4d, 0x03, 0x93, 0xde, 0xd6, 0xaf, - 0x03, 0x26, 0xed, 0xcd, 0xce, 0x41, 0x17, 0xa0, 0xb4, 0x30, 0x7d, 0x13, 0x37, 0x14, 0xfa, 0xa1, - 0x8c, 0x10, 0x12, 0x37, 0x5b, 0xf1, 0xff, 0x58, 0x3b, 0x3d, 0x0d, 0x40, 0xb5, 0x07, 0x6f, 0x7f, - 0xcd, 0x3e, 0x4e, 0xad, 0x59, 0xb5, 0xc9, 0xfa, 0x6b, 0x76, 0x6c, 0x0b, 0xdd, 0xce, 0xc2, 0xad, - 0xbf, 0x3a, 0xed, 0x19, 0x99, 0xb3, 0x9e, 0x91, 0xb9, 0xec, 0x19, 0xda, 0x8f, 0xd8, 0xd0, 0x7e, - 0xc6, 0x86, 0xf6, 0x3b, 0x36, 0xb4, 0xd3, 0xd8, 0xd0, 0xce, 0x62, 0x43, 0xfb, 0x13, 0x1b, 0xda, - 0xdf, 0xd8, 0xc8, 0x5c, 0xc6, 0x86, 0x76, 0x72, 0x61, 0x64, 0x4e, 0x2f, 0x8c, 0xcc, 0xd9, 0x85, - 0x91, 0xf9, 0x92, 0x0d, 0x04, 0x11, 0xb4, 0x95, 0x53, 0xbf, 0x46, 0xcf, 0xfe, 0x05, 0x00, 0x00, - 0xff, 0xff, 0x5e, 0xa1, 0xc3, 0x5e, 0xda, 0x06, 0x00, 0x00, + // 770 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0xf3, 0x34, + 0x1c, 0x6f, 0xc6, 0xda, 0x3e, 0xf5, 0xd6, 0x3e, 0x9b, 0xf7, 0x42, 0x56, 0x50, 0x5c, 0x0d, 0x81, + 0x2a, 0x41, 0xdb, 0x03, 0x07, 0x24, 0x90, 0x80, 0x15, 0x31, 0xa9, 0x62, 0xc0, 0xe4, 0x4e, 0x1c, + 0x38, 0x20, 0xb9, 0x89, 0x9b, 0x5a, 0xe4, 0xa5, 0x72, 0x9c, 0xb2, 0xdd, 0xf8, 0x08, 0xfb, 0x18, + 0x88, 0x4f, 0xc2, 0x71, 0xc7, 0x9d, 0x0c, 0xcb, 0x38, 0x20, 0x9f, 0xf6, 0x11, 0x50, 0xdd, 0x66, + 0x4d, 0xda, 0x6e, 0x08, 0x3d, 0xda, 0xa9, 0xf1, 0xff, 0xf7, 0xe2, 0x7f, 0xfc, 0x77, 0x7f, 0x01, + 0x7b, 0x13, 0xe2, 0x31, 0x87, 0x88, 0x90, 0xf7, 0x82, 0x61, 0xd8, 0x1e, 0xf3, 0x50, 0x84, 0xb0, + 0xa8, 0x7f, 0xea, 0x2d, 0x97, 0x89, 0x51, 0x3c, 0x68, 0xdb, 0xa1, 0xdf, 0x71, 0x43, 0x37, 0xec, + 0xe8, 0xf2, 0x20, 0x1e, 0xea, 0x95, 0x5e, 0xe8, 0xa7, 0x99, 0xea, 0x38, 0xd9, 0x02, 0xd5, 0x1f, + 0xb2, 0x6e, 0xf0, 0x43, 0x50, 0x39, 0x8f, 0x07, 0x1e, 0xb3, 0xbf, 0xa1, 0x57, 0xa6, 0xd1, 0x30, + 0x9a, 0xdb, 0xdd, 0xaa, 0x92, 0xa8, 0x32, 0x4e, 0x8b, 0x78, 0x81, 0xc3, 0xf7, 0x41, 0xb9, 0x3f, + 0x22, 0xdc, 0xe9, 0x39, 0xe6, 0x46, 0xc3, 0x68, 0x56, 0xbb, 0x5b, 0x4a, 0xa2, 0x72, 0x34, 0x2b, + 0xe1, 0x14, 0x83, 0xef, 0x82, 0xcd, 0x33, 0x16, 0x09, 0xf3, 0xad, 0x86, 0xd1, 0xac, 0x74, 0x5f, + 0x29, 0x89, 0x36, 0x3d, 0x16, 0x09, 0xac, 0xab, 0x10, 0x81, 0x62, 0x2f, 0x70, 0xe8, 0xa5, 0xb9, + 0xa9, 0x2d, 0x2a, 0x4a, 0xa2, 0x22, 0x9b, 0x16, 0xf0, 0xac, 0x0e, 0xdb, 0x00, 0x5c, 0x50, 0x7f, + 0x8c, 0x89, 0x60, 0x81, 0x6b, 0x16, 0x35, 0xab, 0xa6, 0x24, 0x02, 0xe2, 0xb1, 0x8a, 0x33, 0x0c, + 0x78, 0x0c, 0x4a, 0x73, 0x6e, 0x49, 0x73, 0x81, 0x92, 0xa8, 0xc4, 0x67, 0xbc, 0x39, 0x02, 0x3f, + 0x05, 0xb5, 0xd9, 0xd3, 0xb7, 0xa1, 0xc3, 0x86, 0x8c, 0x72, 0xb3, 0xdc, 0x30, 0x9a, 0x1b, 0x5d, + 0xa8, 0x24, 0xaa, 0xf1, 0x1c, 0x82, 0x97, 0x98, 0xf0, 0x04, 0x54, 0x31, 0xfd, 0x85, 0x70, 0xe7, + 0xc4, 0x71, 0x38, 0x8d, 0x22, 0xf3, 0x95, 0x3e, 0xa6, 0x77, 0x94, 0x44, 0x6f, 0xf3, 0x2c, 0xf0, + 0x51, 0xe8, 0xb3, 0x69, 0x8f, 0xe2, 0x0a, 0xe7, 0x15, 0xf0, 0x13, 0x50, 0x3d, 0xa3, 0xc4, 0xa1, + 0xbc, 0x1f, 0xdb, 0xf6, 0xd4, 0xa2, 0xa2, 0x3b, 0xdd, 0x55, 0x12, 0x55, 0xbd, 0x2c, 0x80, 0xf3, + 0xbc, 0x85, 0xf0, 0x94, 0x30, 0x2f, 0xe6, 0xd4, 0x04, 0xcb, 0xc2, 0x39, 0x80, 0xf3, 0x3c, 0xf8, + 0x25, 0xd8, 0x79, 0x1c, 0x74, 0xba, 0xe9, 0x96, 0xd6, 0xee, 0x2b, 0x89, 0x76, 0x26, 0x4b, 0x18, + 0x5e, 0x61, 0xe7, 0x1c, 0xd2, 0xdd, 0xb7, 0xd7, 0x38, 0xa4, 0x0d, 0xac, 0xb0, 0xe1, 0x4f, 0xa0, + 0xbe, 0xb8, 0x6c, 0x6e, 0x10, 0x72, 0xea, 0xf4, 0x99, 0x1b, 0x10, 0x11, 0x73, 0x1a, 0x99, 0x55, + 0xed, 0x65, 0x29, 0x89, 0xea, 0x93, 0x27, 0x59, 0xf8, 0x19, 0x87, 0xa9, 0xff, 0x77, 0xb1, 0xdf, + 0xa7, 0x1e, 0xb5, 0x05, 0x75, 0x7a, 0xc1, 0xbc, 0xf3, 0xae, 0x17, 0xda, 0x3f, 0x47, 0x66, 0x6d, + 0xe1, 0x1f, 0x3c, 0xc9, 0xc2, 0xcf, 0x38, 0xc0, 0x6b, 0x03, 0xbc, 0x3e, 0xb1, 0xed, 0xd8, 0x8f, + 0x3d, 0x22, 0xa8, 0x73, 0x4a, 0x69, 0x64, 0xbe, 0xd6, 0xb3, 0x1f, 0x2a, 0x89, 0x8e, 0x48, 0x1e, + 0x5a, 0x4c, 0xff, 0xf7, 0x3f, 0xd1, 0xd7, 0x3e, 0x11, 0xa3, 0xce, 0x80, 0xb9, 0xed, 0x5e, 0x20, + 0x3e, 0xcb, 0xfc, 0x49, 0xfd, 0xd8, 0x13, 0x6c, 0x42, 0x79, 0x74, 0xd9, 0xf1, 0x2f, 0x5b, 0xf6, + 0x88, 0xb0, 0xa0, 0x65, 0x87, 0x9c, 0xb6, 0xdc, 0xb0, 0xe3, 0x10, 0x41, 0xda, 0x5d, 0xe6, 0xf6, + 0x02, 0xf1, 0x15, 0x89, 0x04, 0xe5, 0x78, 0x79, 0x7b, 0x78, 0x0a, 0xe0, 0x45, 0x28, 0x88, 0x97, + 0xbf, 0x4d, 0x3b, 0xfa, 0x55, 0x0f, 0x95, 0x44, 0x50, 0xac, 0xa0, 0x78, 0x8d, 0x62, 0xc9, 0x27, + 0x1d, 0xef, 0xee, 0x5a, 0x9f, 0x74, 0xc0, 0x6b, 0x14, 0xf0, 0x7b, 0x70, 0xa0, 0xab, 0x2b, 0x77, + 0x0d, 0x6a, 0xab, 0x23, 0x25, 0xd1, 0x81, 0x58, 0x47, 0xc0, 0xeb, 0x75, 0xab, 0x86, 0x69, 0x6f, + 0x7b, 0x4f, 0x19, 0xa6, 0xed, 0xad, 0xd7, 0x41, 0x1f, 0xa0, 0x3c, 0xb0, 0x7a, 0x13, 0xf7, 0xb5, + 0xf5, 0x7b, 0x4a, 0x22, 0x24, 0x9e, 0xa7, 0xe2, 0xff, 0xf2, 0x82, 0x9f, 0x83, 0xed, 0x73, 0x4e, + 0x27, 0x2c, 0x8c, 0x23, 0x9d, 0x81, 0x07, 0x3a, 0x03, 0xeb, 0x4a, 0xa2, 0xc3, 0x71, 0xa6, 0x9e, + 0x89, 0x8a, 0x1c, 0x7f, 0x1a, 0x36, 0xe9, 0x7a, 0x96, 0x92, 0x87, 0xba, 0x39, 0x1d, 0x36, 0xe3, + 0x2c, 0x90, 0x0d, 0x9b, 0x9c, 0xe2, 0xf8, 0xef, 0x0d, 0x00, 0x75, 0x14, 0xbf, 0x7c, 0xd2, 0x7f, + 0x90, 0x4b, 0x7a, 0x1d, 0xa6, 0x5e, 0xfe, 0xed, 0x5e, 0x28, 0xf3, 0x97, 0x8f, 0xb9, 0xf4, 0xa6, + 0xc7, 0x5c, 0xfe, 0xbf, 0xc7, 0xdc, 0xfd, 0xe2, 0xe6, 0xce, 0x2a, 0xdc, 0xde, 0x59, 0x85, 0x87, + 0x3b, 0xcb, 0xf8, 0x35, 0xb1, 0x8c, 0xdf, 0x12, 0xcb, 0xf8, 0x23, 0xb1, 0x8c, 0x9b, 0xc4, 0x32, + 0x6e, 0x13, 0xcb, 0xf8, 0x2b, 0xb1, 0x8c, 0x7f, 0x12, 0xab, 0xf0, 0x90, 0x58, 0xc6, 0xf5, 0xbd, + 0x55, 0xb8, 0xb9, 0xb7, 0x0a, 0xb7, 0xf7, 0x56, 0xe1, 0xc7, 0x62, 0x24, 0x88, 0xa0, 0x83, 0x92, + 0xfe, 0x26, 0x7f, 0xfc, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x69, 0x2e, 0x1c, 0xe0, 0x07, + 0x00, 0x00, } func (this *ValidatorInfo) Equal(that interface{}) bool { @@ -431,6 +467,12 @@ func (this *ValidatorInfo) Equal(that interface{}) bool { if this.TotalValidatorIgnoredSignatures != that1.TotalValidatorIgnoredSignatures { return false } + if this.PreviousList != that1.PreviousList { + return false + } + if this.PreviousIndex != that1.PreviousIndex { + return false + } return true } func (this *ShardValidatorInfo) Equal(that interface{}) bool { @@ -467,13 +509,19 @@ func (this *ShardValidatorInfo) Equal(that interface{}) bool { if this.TempRating != that1.TempRating { return false } + if this.PreviousList != that1.PreviousList { + return false + } + if this.PreviousIndex != that1.PreviousIndex { + return false + } return true } func (this *ValidatorInfo) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 24) + s := make([]string, 0, 26) s = append(s, "&state.ValidatorInfo{") s = append(s, "PublicKey: "+fmt.Sprintf("%#v", this.PublicKey)+",\n") s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n") @@ -495,6 +543,8 @@ func (this *ValidatorInfo) GoString() string { s = append(s, "TotalValidatorSuccess: "+fmt.Sprintf("%#v", this.TotalValidatorSuccess)+",\n") s = append(s, "TotalValidatorFailure: "+fmt.Sprintf("%#v", this.TotalValidatorFailure)+",\n") s = append(s, "TotalValidatorIgnoredSignatures: "+fmt.Sprintf("%#v", this.TotalValidatorIgnoredSignatures)+",\n") + s = append(s, "PreviousList: "+fmt.Sprintf("%#v", this.PreviousList)+",\n") + s = append(s, "PreviousIndex: "+fmt.Sprintf("%#v", this.PreviousIndex)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -502,13 +552,15 @@ func (this *ShardValidatorInfo) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 11) s = append(s, "&state.ShardValidatorInfo{") s = append(s, "PublicKey: "+fmt.Sprintf("%#v", this.PublicKey)+",\n") s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n") s = append(s, "List: "+fmt.Sprintf("%#v", this.List)+",\n") s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") s = append(s, "TempRating: "+fmt.Sprintf("%#v", this.TempRating)+",\n") + s = append(s, "PreviousList: "+fmt.Sprintf("%#v", this.PreviousList)+",\n") + s = append(s, "PreviousIndex: "+fmt.Sprintf("%#v", this.PreviousIndex)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -540,6 +592,22 @@ func (m *ValidatorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreviousIndex != 0 { + i = encodeVarintValidatorInfo(dAtA, i, uint64(m.PreviousIndex)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if len(m.PreviousList) > 0 { + i -= len(m.PreviousList) + copy(dAtA[i:], m.PreviousList) + i = encodeVarintValidatorInfo(dAtA, i, uint64(len(m.PreviousList))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } if m.TotalValidatorIgnoredSignatures != 0 { i = encodeVarintValidatorInfo(dAtA, i, uint64(m.TotalValidatorIgnoredSignatures)) i-- @@ -686,6 +754,18 @@ func (m *ShardValidatorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreviousIndex != 0 { + i = encodeVarintValidatorInfo(dAtA, i, uint64(m.PreviousIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.PreviousList) > 0 { + i -= len(m.PreviousList) + copy(dAtA[i:], m.PreviousList) + i = encodeVarintValidatorInfo(dAtA, i, uint64(len(m.PreviousList))) + i-- + dAtA[i] = 0x32 + } if m.TempRating != 0 { i = encodeVarintValidatorInfo(dAtA, i, uint64(m.TempRating)) i-- @@ -800,6 +880,13 @@ func (m *ValidatorInfo) Size() (n int) { if m.TotalValidatorIgnoredSignatures != 0 { n += 2 + sovValidatorInfo(uint64(m.TotalValidatorIgnoredSignatures)) } + l = len(m.PreviousList) + if l > 0 { + n += 2 + l + sovValidatorInfo(uint64(l)) + } + if m.PreviousIndex != 0 { + n += 2 + sovValidatorInfo(uint64(m.PreviousIndex)) + } return n } @@ -826,6 +913,13 @@ func (m *ShardValidatorInfo) Size() (n int) { if m.TempRating != 0 { n += 1 + sovValidatorInfo(uint64(m.TempRating)) } + l = len(m.PreviousList) + if l > 0 { + n += 1 + l + sovValidatorInfo(uint64(l)) + } + if m.PreviousIndex != 0 { + n += 1 + sovValidatorInfo(uint64(m.PreviousIndex)) + } return n } @@ -860,6 +954,8 @@ func (this *ValidatorInfo) String() string { `TotalValidatorSuccess:` + fmt.Sprintf("%v", this.TotalValidatorSuccess) + `,`, `TotalValidatorFailure:` + fmt.Sprintf("%v", this.TotalValidatorFailure) + `,`, `TotalValidatorIgnoredSignatures:` + fmt.Sprintf("%v", this.TotalValidatorIgnoredSignatures) + `,`, + `PreviousList:` + fmt.Sprintf("%v", this.PreviousList) + `,`, + `PreviousIndex:` + fmt.Sprintf("%v", this.PreviousIndex) + `,`, `}`, }, "") return s @@ -874,6 +970,8 @@ func (this *ShardValidatorInfo) String() string { `List:` + fmt.Sprintf("%v", this.List) + `,`, `Index:` + fmt.Sprintf("%v", this.Index) + `,`, `TempRating:` + fmt.Sprintf("%v", this.TempRating) + `,`, + `PreviousList:` + fmt.Sprintf("%v", this.PreviousList) + `,`, + `PreviousIndex:` + fmt.Sprintf("%v", this.PreviousIndex) + `,`, `}`, }, "") return s @@ -1349,6 +1447,57 @@ func (m *ValidatorInfo) Unmarshal(dAtA []byte) error { break } } + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthValidatorInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthValidatorInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousList = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousIndex", wireType) + } + m.PreviousIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreviousIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipValidatorInfo(dAtA[iNdEx:]) @@ -1525,6 +1674,57 @@ func (m *ShardValidatorInfo) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthValidatorInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthValidatorInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousList = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousIndex", wireType) + } + m.PreviousIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreviousIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipValidatorInfo(dAtA[iNdEx:]) diff --git a/state/validatorInfo.proto b/state/validatorInfo.proto index c6256810091..2df2149d8f5 100644 --- a/state/validatorInfo.proto +++ b/state/validatorInfo.proto @@ -29,13 +29,17 @@ message ValidatorInfo { uint32 TotalValidatorSuccess = 18 [(gogoproto.jsontag) = "totalValidatorSuccess"]; uint32 TotalValidatorFailure = 19 [(gogoproto.jsontag) = "totalValidatorFailure"]; uint32 TotalValidatorIgnoredSignatures = 20 [(gogoproto.jsontag) = "totalValidatorIgnoredSignatures"]; + string PreviousList = 21 [(gogoproto.jsontag) = "previousList,omitempty"]; + uint32 PreviousIndex = 22 [(gogoproto.jsontag) = "previousIndex,omitempty"]; } // ShardValidatorInfo represents the data regarding a validator that is stored in the PeerMiniblocks message ShardValidatorInfo { - bytes PublicKey = 1 [(gogoproto.jsontag) = "publicKey"]; - uint32 ShardId = 2 [(gogoproto.jsontag) = "shardId"]; - string List = 3 [(gogoproto.jsontag) = "list,omitempty"]; - uint32 Index = 4 [(gogoproto.jsontag) = "index"]; - uint32 TempRating = 5 [(gogoproto.jsontag) = "tempRating"]; + bytes PublicKey = 1 [(gogoproto.jsontag) = "publicKey"]; + uint32 ShardId = 2 [(gogoproto.jsontag) = "shardId"]; + string List = 3 [(gogoproto.jsontag) = "list,omitempty"]; + uint32 Index = 4 [(gogoproto.jsontag) = "index"]; + uint32 TempRating = 5 [(gogoproto.jsontag) = "tempRating"]; + string PreviousList = 6 [(gogoproto.jsontag) = "previousList,omitempty"]; + uint32 PreviousIndex = 7 [(gogoproto.jsontag) = "previousIndex,omitempty"]; } diff --git a/state/validatorInfo_test.go b/state/validatorInfo_test.go deleted file mode 100644 index 6a6ca0be930..00000000000 --- a/state/validatorInfo_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package state - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/stretchr/testify/assert" -) - -func TestValidatorInfo_IsInterfaceNile(t *testing.T) { - t.Parallel() - - vi := &ValidatorInfo{} - assert.False(t, check.IfNil(vi)) -} diff --git a/state/validatorsInfoMap.go b/state/validatorsInfoMap.go new file mode 100644 index 00000000000..992da3b4556 --- /dev/null +++ b/state/validatorsInfoMap.go @@ -0,0 +1,198 @@ +package state + +import ( + "bytes" + "encoding/hex" + "fmt" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" +) + +type shardValidatorsInfoMap struct { + mutex sync.RWMutex + valInfoMap map[uint32][]ValidatorInfoHandler +} + +// NewShardValidatorsInfoMap creates an instance of shardValidatorsInfoMap which manages a +// map internally +func NewShardValidatorsInfoMap() *shardValidatorsInfoMap { + return &shardValidatorsInfoMap{ + mutex: sync.RWMutex{}, + valInfoMap: make(map[uint32][]ValidatorInfoHandler), + } +} + +// GetAllValidatorsInfo returns a []ValidatorInfoHandler copy with validators from all shards. +func (vi *shardValidatorsInfoMap) GetAllValidatorsInfo() []ValidatorInfoHandler { + ret := make([]ValidatorInfoHandler, 0) + + vi.mutex.RLock() + defer vi.mutex.RUnlock() + + for _, validatorsInShard := range vi.valInfoMap { + validatorsCopy := make([]ValidatorInfoHandler, len(validatorsInShard)) + copy(validatorsCopy, validatorsInShard) + ret = append(ret, validatorsCopy...) + } + + return ret +} + +// GetShardValidatorsInfoMap returns a map copy of internally stored data +func (vi *shardValidatorsInfoMap) GetShardValidatorsInfoMap() map[uint32][]ValidatorInfoHandler { + ret := make(map[uint32][]ValidatorInfoHandler, len(vi.valInfoMap)) + + vi.mutex.RLock() + defer vi.mutex.RUnlock() + + for shardID, validatorsInShard := range vi.valInfoMap { + validatorsCopy := make([]ValidatorInfoHandler, len(validatorsInShard)) + copy(validatorsCopy, validatorsInShard) + ret[shardID] = validatorsCopy + } + + return ret +} + +// Add adds a ValidatorInfoHandler in its corresponding shardID +func (vi *shardValidatorsInfoMap) Add(validator ValidatorInfoHandler) error { + if check.IfNil(validator) { + return ErrNilValidatorInfo + } + + shardID := validator.GetShardId() + vi.mutex.Lock() + vi.valInfoMap[shardID] = append(vi.valInfoMap[shardID], validator) + vi.mutex.Unlock() + + return nil +} + +// GetValidator returns a ValidatorInfoHandler copy with the provided blsKey, +// if it is present in the map, otherwise returns nil +func (vi *shardValidatorsInfoMap) GetValidator(blsKey []byte) ValidatorInfoHandler { + for _, validator := range vi.GetAllValidatorsInfo() { + if bytes.Equal(validator.GetPublicKey(), blsKey) { + return validator.ShallowClone() + } + } + + return nil +} + +// Replace will replace an existing ValidatorInfoHandler with a new one. The old and new validator +// shall be in the same shard. If the old validator is not found in the map, an error is returned +func (vi *shardValidatorsInfoMap) Replace(old ValidatorInfoHandler, new ValidatorInfoHandler) error { + if check.IfNil(old) { + return fmt.Errorf("%w for old validator in shardValidatorsInfoMap.Replace", ErrNilValidatorInfo) + } + if check.IfNil(new) { + return fmt.Errorf("%w for new validator in shardValidatorsInfoMap.Replace", ErrNilValidatorInfo) + } + if old.GetShardId() != new.GetShardId() { + return fmt.Errorf("%w when trying to replace %s from shard %v with %s from shard %v", + ErrValidatorsDifferentShards, + hex.EncodeToString(old.GetPublicKey()), + old.GetShardId(), + hex.EncodeToString(new.GetPublicKey()), + new.GetShardId(), + ) + } + + shardID := old.GetShardId() + log.Debug("shardValidatorsInfoMap.Replace", + "old validator", hex.EncodeToString(old.GetPublicKey()), "shard", old.GetShardId(), "list", old.GetList(), + "with new validator", hex.EncodeToString(new.GetPublicKey()), "shard", new.GetShardId(), "list", new.GetList(), + ) + + replaced := vi.ReplaceValidatorByKey(old.GetPublicKey(), new, shardID) + if replaced { + return nil + } + + return fmt.Errorf("old %w: %s when trying to replace it with %s", + ErrValidatorNotFound, + hex.EncodeToString(old.GetPublicKey()), + hex.EncodeToString(new.GetPublicKey()), + ) +} + +// ReplaceValidatorByKey will replace an existing ValidatorInfoHandler with a new one, based on the provided blsKey for the old record. +func (vi *shardValidatorsInfoMap) ReplaceValidatorByKey(oldBlsKey []byte, new ValidatorInfoHandler, shardID uint32) bool { + vi.mutex.Lock() + defer vi.mutex.Unlock() + + for idx, validator := range vi.valInfoMap[shardID] { + if bytes.Equal(validator.GetPublicKey(), oldBlsKey) { + vi.valInfoMap[shardID][idx] = new + return true + } + } + return false +} + +// SetValidatorsInShard resets all validators saved in a specific shard with the provided []ValidatorInfoHandler. +// Before setting them, it checks that provided validators have the same shardID as the one provided. +func (vi *shardValidatorsInfoMap) SetValidatorsInShard(shardID uint32, validators []ValidatorInfoHandler) error { + sameShardValidators := make([]ValidatorInfoHandler, 0, len(validators)) + for idx, validator := range validators { + if check.IfNil(validator) { + return fmt.Errorf("%w in shardValidatorsInfoMap.SetValidatorsInShard at index %d", + ErrNilValidatorInfo, + idx, + ) + } + if validator.GetShardId() != shardID { + return fmt.Errorf("%w, %s is in shard %d, but should be set in shard %d in shardValidatorsInfoMap.SetValidatorsInShard", + ErrValidatorsDifferentShards, + hex.EncodeToString(validator.GetPublicKey()), + validator.GetShardId(), + shardID, + ) + } + sameShardValidators = append(sameShardValidators, validator) + } + + vi.mutex.Lock() + vi.valInfoMap[shardID] = sameShardValidators + vi.mutex.Unlock() + + return nil +} + +// SetValidatorsInShardUnsafe resets all validators saved in a specific shard with the provided ones. +// It does not check that provided validators are in the same shard as provided shard id. +func (vi *shardValidatorsInfoMap) SetValidatorsInShardUnsafe(shardID uint32, validators []ValidatorInfoHandler) { + vi.mutex.Lock() + vi.valInfoMap[shardID] = validators + vi.mutex.Unlock() +} + +// Delete will delete the provided validator from the internally stored map, if found. +// The validators slice at the corresponding shardID key will be re-sliced, without reordering +func (vi *shardValidatorsInfoMap) Delete(validator ValidatorInfoHandler) error { + if check.IfNil(validator) { + return ErrNilValidatorInfo + } + + shardID := validator.GetShardId() + vi.DeleteByKey(validator.GetPublicKey(), shardID) + return nil +} + +// DeleteByKey will delete the provided blsKey from the internally stored map, if found. +func (vi *shardValidatorsInfoMap) DeleteByKey(blsKey []byte, shardID uint32) { + vi.mutex.Lock() + defer vi.mutex.Unlock() + + for index, validatorInfo := range vi.valInfoMap[shardID] { + if bytes.Equal(validatorInfo.GetPublicKey(), blsKey) { + length := len(vi.valInfoMap[shardID]) + vi.valInfoMap[shardID][index] = vi.valInfoMap[shardID][length-1] + vi.valInfoMap[shardID][length-1] = nil + vi.valInfoMap[shardID] = vi.valInfoMap[shardID][:length-1] + break + } + } +} diff --git a/state/validatorsInfoMap_test.go b/state/validatorsInfoMap_test.go new file mode 100644 index 00000000000..e90c01993cd --- /dev/null +++ b/state/validatorsInfoMap_test.go @@ -0,0 +1,345 @@ +package state + +import ( + "encoding/hex" + "strconv" + "strings" + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/stretchr/testify/require" +) + +func TestShardValidatorsInfoMap_OperationsWithNilValidators(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + t.Run("add nil validator", func(t *testing.T) { + t.Parallel() + + err := vi.Add(nil) + require.Equal(t, ErrNilValidatorInfo, err) + }) + + t.Run("delete nil validator", func(t *testing.T) { + t.Parallel() + + err := vi.Delete(nil) + require.Equal(t, ErrNilValidatorInfo, err) + }) + + t.Run("replace nil validator", func(t *testing.T) { + t.Parallel() + + err := vi.Replace(nil, &ValidatorInfo{}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrNilValidatorInfo.Error())) + require.True(t, strings.Contains(err.Error(), "old")) + + err = vi.Replace(&ValidatorInfo{}, nil) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrNilValidatorInfo.Error())) + require.True(t, strings.Contains(err.Error(), "new")) + }) + + t.Run("set nil validators in shard", func(t *testing.T) { + t.Parallel() + + v := &ValidatorInfo{ShardId: 3, PublicKey: []byte("pk")} + err := vi.SetValidatorsInShard(3, []ValidatorInfoHandler{v, nil}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrNilValidatorInfo.Error())) + require.True(t, strings.Contains(err.Error(), "index 1")) + }) +} + +func TestShardValidatorsInfoMap_Add_GetShardValidatorsInfoMap_GetAllValidatorsInfo_GetValInfoPointerMap(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + v2 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk2")} + v3 := &ValidatorInfo{ShardId: core.MetachainShardId, PublicKey: []byte("pk3")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + _ = vi.Add(v2) + _ = vi.Add(v3) + + allValidators := vi.GetAllValidatorsInfo() + require.Len(t, allValidators, 4) + require.Contains(t, allValidators, v0) + require.Contains(t, allValidators, v1) + require.Contains(t, allValidators, v2) + require.Contains(t, allValidators, v3) + + validatorsMap := vi.GetShardValidatorsInfoMap() + expectedValidatorsMap := map[uint32][]ValidatorInfoHandler{ + 0: {v0, v1}, + 1: {v2}, + core.MetachainShardId: {v3}, + } + require.Equal(t, validatorsMap, expectedValidatorsMap) +} + +func TestShardValidatorsInfoMap_GetValidator(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + pubKey0 := []byte("pk0") + pubKey1 := []byte("pk1") + v0 := &ValidatorInfo{ShardId: 0, PublicKey: pubKey0} + v1 := &ValidatorInfo{ShardId: 1, PublicKey: pubKey1} + + _ = vi.Add(v0) + _ = vi.Add(v1) + + require.Equal(t, v0, vi.GetValidator(pubKey0)) + require.Equal(t, v1, vi.GetValidator(pubKey1)) + require.Nil(t, vi.GetValidator([]byte("pk2"))) +} + +func TestShardValidatorsInfoMap_Delete(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + v2 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk2")} + v3 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk3")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + _ = vi.Add(v2) + _ = vi.Add(v3) + + _ = vi.Delete(&ValidatorInfo{ShardId: 0, PublicKey: []byte("pk3")}) + _ = vi.Delete(&ValidatorInfo{ShardId: 1, PublicKey: []byte("pk0")}) + require.Len(t, vi.GetAllValidatorsInfo(), 4) + + _ = vi.Delete(v1) + require.Len(t, vi.GetAllValidatorsInfo(), 3) + require.Equal(t, []ValidatorInfoHandler{v0, v2}, vi.GetShardValidatorsInfoMap()[0]) + require.Equal(t, []ValidatorInfoHandler{v3}, vi.GetShardValidatorsInfoMap()[1]) + + _ = vi.Delete(v3) + require.Len(t, vi.GetAllValidatorsInfo(), 2) + require.Equal(t, []ValidatorInfoHandler{v0, v2}, vi.GetShardValidatorsInfoMap()[0]) +} + +func TestShardValidatorsInfoMap_Replace(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + + err := vi.Replace(v0, &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk2")}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorsDifferentShards.Error())) + require.Equal(t, []ValidatorInfoHandler{v0, v1}, vi.GetShardValidatorsInfoMap()[0]) + + v2 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk2")} + err = vi.Replace(v0, v2) + require.Nil(t, err) + require.Equal(t, []ValidatorInfoHandler{v2, v1}, vi.GetShardValidatorsInfoMap()[0]) + + v3 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk3")} + v4 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk4")} + err = vi.Replace(v3, v4) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorNotFound.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v3.PublicKey))) + require.Equal(t, []ValidatorInfoHandler{v2, v1}, vi.GetShardValidatorsInfoMap()[0]) +} + +func TestShardValidatorsInfoMap_SetValidatorsInShard(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + _ = vi.Add(v0) + + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + v2 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk2")} + v3 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk3")} + shard0Validators := []ValidatorInfoHandler{v1, v2} + shard1Validators := []ValidatorInfoHandler{v3} + + err := vi.SetValidatorsInShard(1, shard0Validators) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorsDifferentShards.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v1.PublicKey))) + require.Equal(t, []ValidatorInfoHandler{v0}, vi.GetShardValidatorsInfoMap()[0]) + require.Empty(t, vi.GetShardValidatorsInfoMap()[1]) + + err = vi.SetValidatorsInShard(0, []ValidatorInfoHandler{v1, v2, v3}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorsDifferentShards.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v3.PublicKey))) + require.Equal(t, []ValidatorInfoHandler{v0}, vi.GetShardValidatorsInfoMap()[0]) + require.Empty(t, vi.GetShardValidatorsInfoMap()[1]) + + err = vi.SetValidatorsInShard(0, shard0Validators) + require.Nil(t, err) + require.Equal(t, shard0Validators, vi.GetShardValidatorsInfoMap()[0]) + + err = vi.SetValidatorsInShard(1, shard1Validators) + require.Nil(t, err) + require.Equal(t, shard1Validators, vi.GetShardValidatorsInfoMap()[1]) +} + +func TestShardValidatorsInfoMap_GettersShouldReturnCopiesOfInternalData(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk1")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + + validatorsMap := vi.GetShardValidatorsInfoMap() + delete(validatorsMap, 0) + validatorsMap[1][0].SetPublicKey([]byte("rnd")) + + validators := vi.GetAllValidatorsInfo() + validators = append(validators, &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk3")}) + + validator := vi.GetValidator([]byte("pk0")) + require.False(t, validator == v0) // require not same pointer + validator.SetShardId(2) + + require.Len(t, vi.GetAllValidatorsInfo(), 2) + require.True(t, vi.GetShardValidatorsInfoMap()[0][0] == v0) // check by pointer + require.True(t, vi.GetShardValidatorsInfoMap()[1][0] == v1) // check by pointer + require.NotEqual(t, vi.GetAllValidatorsInfo(), validators) +} + +func TestShardValidatorsInfoMap_Concurrency(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + numValidatorsShard0 := 100 + numValidatorsShard1 := 50 + numValidators := numValidatorsShard0 + numValidatorsShard1 + + shard0Validators := createValidatorsInfo(0, numValidatorsShard0) + shard1Validators := createValidatorsInfo(1, numValidatorsShard1) + + firstHalfShard0 := shard0Validators[:numValidatorsShard0/2] + secondHalfShard0 := shard0Validators[numValidatorsShard0/2:] + + firstHalfShard1 := shard1Validators[:numValidatorsShard1/2] + secondHalfShard1 := shard1Validators[numValidatorsShard1/2:] + + wg := &sync.WaitGroup{} + + wg.Add(numValidators) + go addValidatorsInShardConcurrently(vi, shard0Validators, wg) + go addValidatorsInShardConcurrently(vi, shard1Validators, wg) + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], shard0Validators) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], shard1Validators) + + wg.Add(numValidators / 2) + go deleteValidatorsConcurrently(vi, firstHalfShard0, wg) + go deleteValidatorsConcurrently(vi, firstHalfShard1, wg) + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], secondHalfShard0) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], secondHalfShard1) + + wg.Add(numValidators / 2) + go replaceValidatorsConcurrently(vi, vi.GetShardValidatorsInfoMap()[0], firstHalfShard0, wg) + go replaceValidatorsConcurrently(vi, vi.GetShardValidatorsInfoMap()[1], firstHalfShard1, wg) + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], firstHalfShard0) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], firstHalfShard1) + + wg.Add(2) + go func() { + _ = vi.SetValidatorsInShard(0, shard0Validators) + wg.Done() + }() + go func() { + _ = vi.SetValidatorsInShard(1, shard1Validators) + wg.Done() + }() + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], shard0Validators) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], shard1Validators) +} + +func requireSameValidatorsDifferentOrder(t *testing.T, dest []ValidatorInfoHandler, src []ValidatorInfoHandler) { + require.Equal(t, len(dest), len(src)) + + for _, v := range src { + require.Contains(t, dest, v) + } +} + +func createValidatorsInfo(shardID uint32, numOfValidators int) []ValidatorInfoHandler { + ret := make([]ValidatorInfoHandler, 0, numOfValidators) + + for i := 0; i < numOfValidators; i++ { + ret = append(ret, &ValidatorInfo{ + ShardId: shardID, + PublicKey: []byte(strconv.Itoa(int(shardID)) + "pubKey" + strconv.Itoa(i)), + }) + } + + return ret +} + +func addValidatorsInShardConcurrently( + vi ShardValidatorsInfoMapHandler, + validators []ValidatorInfoHandler, + wg *sync.WaitGroup, +) { + for _, validator := range validators { + go func(val ValidatorInfoHandler) { + _ = vi.Add(val) + wg.Done() + }(validator) + } +} + +func deleteValidatorsConcurrently( + vi ShardValidatorsInfoMapHandler, + validators []ValidatorInfoHandler, + wg *sync.WaitGroup, +) { + for _, validator := range validators { + go func(val ValidatorInfoHandler) { + _ = vi.Delete(val) + wg.Done() + }(validator) + } +} + +func replaceValidatorsConcurrently( + vi ShardValidatorsInfoMapHandler, + oldValidators []ValidatorInfoHandler, + newValidators []ValidatorInfoHandler, + wg *sync.WaitGroup, +) { + for idx := range oldValidators { + go func(old ValidatorInfoHandler, new ValidatorInfoHandler) { + _ = vi.Replace(old, new) + wg.Done() + }(oldValidators[idx], newValidators[idx]) + } +} diff --git a/statusHandler/persister/persistentHandler.go b/statusHandler/persister/persistentHandler.go index b2d9c750082..93561363247 100644 --- a/statusHandler/persister/persistentHandler.go +++ b/statusHandler/persister/persistentHandler.go @@ -58,6 +58,7 @@ func (psh *PersistentStatusHandler) initMap() { psh.persistentMetrics.Store(common.MetricNumProcessedTxs, initUint) psh.persistentMetrics.Store(common.MetricNumShardHeadersProcessed, initUint) psh.persistentMetrics.Store(common.MetricNonce, initUint) + psh.persistentMetrics.Store(common.MetricBlockTimestamp, initUint) psh.persistentMetrics.Store(common.MetricCurrentRound, initUint) psh.persistentMetrics.Store(common.MetricNonceAtEpochStart, initUint) psh.persistentMetrics.Store(common.MetricRoundAtEpochStart, initUint) diff --git a/statusHandler/statusMetricsProvider.go b/statusHandler/statusMetricsProvider.go index c4211e889a2..30ead1e5749 100644 --- a/statusHandler/statusMetricsProvider.go +++ b/statusHandler/statusMetricsProvider.go @@ -295,8 +295,89 @@ func (sm *statusMetrics) EnableEpochsMetrics() (map[string]interface{}, error) { enableEpochsMetrics[common.MetricDelegationSmartContractEnableEpoch] = sm.uint64Metrics[common.MetricDelegationSmartContractEnableEpoch] enableEpochsMetrics[common.MetricIncrementSCRNonceInMultiTransferEnableEpoch] = sm.uint64Metrics[common.MetricIncrementSCRNonceInMultiTransferEnableEpoch] enableEpochsMetrics[common.MetricBalanceWaitingListsEnableEpoch] = sm.uint64Metrics[common.MetricBalanceWaitingListsEnableEpoch] - enableEpochsMetrics[common.MetricWaitingListFixEnableEpoch] = sm.uint64Metrics[common.MetricWaitingListFixEnableEpoch] + enableEpochsMetrics[common.MetricCorrectLastUnjailedEnableEpoch] = sm.uint64Metrics[common.MetricCorrectLastUnjailedEnableEpoch] + enableEpochsMetrics[common.MetricReturnDataToLastTransferEnableEpoch] = sm.uint64Metrics[common.MetricReturnDataToLastTransferEnableEpoch] + enableEpochsMetrics[common.MetricSenderInOutTransferEnableEpoch] = sm.uint64Metrics[common.MetricSenderInOutTransferEnableEpoch] + enableEpochsMetrics[common.MetricRelayedTransactionsV2EnableEpoch] = sm.uint64Metrics[common.MetricRelayedTransactionsV2EnableEpoch] + enableEpochsMetrics[common.MetricUnbondTokensV2EnableEpoch] = sm.uint64Metrics[common.MetricUnbondTokensV2EnableEpoch] + enableEpochsMetrics[common.MetricSaveJailedAlwaysEnableEpoch] = sm.uint64Metrics[common.MetricSaveJailedAlwaysEnableEpoch] + enableEpochsMetrics[common.MetricValidatorToDelegationEnableEpoch] = sm.uint64Metrics[common.MetricValidatorToDelegationEnableEpoch] + enableEpochsMetrics[common.MetricReDelegateBelowMinCheckEnableEpoch] = sm.uint64Metrics[common.MetricReDelegateBelowMinCheckEnableEpoch] + enableEpochsMetrics[common.MetricScheduledMiniBlocksEnableEpoch] = sm.uint64Metrics[common.MetricScheduledMiniBlocksEnableEpoch] + enableEpochsMetrics[common.MetricESDTMultiTransferEnableEpoch] = sm.uint64Metrics[common.MetricESDTMultiTransferEnableEpoch] + enableEpochsMetrics[common.MetricGlobalMintBurnDisableEpoch] = sm.uint64Metrics[common.MetricGlobalMintBurnDisableEpoch] + enableEpochsMetrics[common.MetricESDTTransferRoleEnableEpoch] = sm.uint64Metrics[common.MetricESDTTransferRoleEnableEpoch] + enableEpochsMetrics[common.MetricComputeRewardCheckpointEnableEpoch] = sm.uint64Metrics[common.MetricComputeRewardCheckpointEnableEpoch] + enableEpochsMetrics[common.MetricSCRSizeInvariantCheckEnableEpoch] = sm.uint64Metrics[common.MetricSCRSizeInvariantCheckEnableEpoch] + enableEpochsMetrics[common.MetricBackwardCompSaveKeyValueEnableEpoch] = sm.uint64Metrics[common.MetricBackwardCompSaveKeyValueEnableEpoch] + enableEpochsMetrics[common.MetricESDTNFTCreateOnMultiShardEnableEpoch] = sm.uint64Metrics[common.MetricESDTNFTCreateOnMultiShardEnableEpoch] + enableEpochsMetrics[common.MetricMetaESDTSetEnableEpoch] = sm.uint64Metrics[common.MetricMetaESDTSetEnableEpoch] + enableEpochsMetrics[common.MetricAddTokensToDelegationEnableEpoch] = sm.uint64Metrics[common.MetricAddTokensToDelegationEnableEpoch] + enableEpochsMetrics[common.MetricMultiESDTTransferFixOnCallBackOnEnableEpoch] = sm.uint64Metrics[common.MetricMultiESDTTransferFixOnCallBackOnEnableEpoch] + enableEpochsMetrics[common.MetricOptimizeGasUsedInCrossMiniBlocksEnableEpoch] = sm.uint64Metrics[common.MetricOptimizeGasUsedInCrossMiniBlocksEnableEpoch] + enableEpochsMetrics[common.MetricCorrectFirstQueuedEpoch] = sm.uint64Metrics[common.MetricCorrectFirstQueuedEpoch] + enableEpochsMetrics[common.MetricCorrectJailedNotUnstakedEmptyQueueEpoch] = sm.uint64Metrics[common.MetricCorrectJailedNotUnstakedEmptyQueueEpoch] + enableEpochsMetrics[common.MetricFixOOGReturnCodeEnableEpoch] = sm.uint64Metrics[common.MetricFixOOGReturnCodeEnableEpoch] + enableEpochsMetrics[common.MetricRemoveNonUpdatedStorageEnableEpoch] = sm.uint64Metrics[common.MetricRemoveNonUpdatedStorageEnableEpoch] + enableEpochsMetrics[common.MetricDeleteDelegatorAfterClaimRewardsEnableEpoch] = sm.uint64Metrics[common.MetricDeleteDelegatorAfterClaimRewardsEnableEpoch] + enableEpochsMetrics[common.MetricOptimizeNFTStoreEnableEpoch] = sm.uint64Metrics[common.MetricOptimizeNFTStoreEnableEpoch] + enableEpochsMetrics[common.MetricCreateNFTThroughExecByCallerEnableEpoch] = sm.uint64Metrics[common.MetricCreateNFTThroughExecByCallerEnableEpoch] + enableEpochsMetrics[common.MetricStopDecreasingValidatorRatingWhenStuckEnableEpoch] = sm.uint64Metrics[common.MetricStopDecreasingValidatorRatingWhenStuckEnableEpoch] + enableEpochsMetrics[common.MetricFrontRunningProtectionEnableEpoch] = sm.uint64Metrics[common.MetricFrontRunningProtectionEnableEpoch] + enableEpochsMetrics[common.MetricIsPayableBySCEnableEpoch] = sm.uint64Metrics[common.MetricIsPayableBySCEnableEpoch] + enableEpochsMetrics[common.MetricCleanUpInformativeSCRsEnableEpoch] = sm.uint64Metrics[common.MetricCleanUpInformativeSCRsEnableEpoch] + enableEpochsMetrics[common.MetricStorageAPICostOptimizationEnableEpoch] = sm.uint64Metrics[common.MetricStorageAPICostOptimizationEnableEpoch] + enableEpochsMetrics[common.MetricTransformToMultiShardCreateEnableEpoch] = sm.uint64Metrics[common.MetricTransformToMultiShardCreateEnableEpoch] + enableEpochsMetrics[common.MetricESDTRegisterAndSetAllRolesEnableEpoch] = sm.uint64Metrics[common.MetricESDTRegisterAndSetAllRolesEnableEpoch] + enableEpochsMetrics[common.MetricDoNotReturnOldBlockInBlockchainHookEnableEpoch] = sm.uint64Metrics[common.MetricDoNotReturnOldBlockInBlockchainHookEnableEpoch] + enableEpochsMetrics[common.MetricAddFailedRelayedTxToInvalidMBsDisableEpoch] = sm.uint64Metrics[common.MetricAddFailedRelayedTxToInvalidMBsDisableEpoch] + enableEpochsMetrics[common.MetricSCRSizeInvariantOnBuiltInResultEnableEpoch] = sm.uint64Metrics[common.MetricSCRSizeInvariantOnBuiltInResultEnableEpoch] + enableEpochsMetrics[common.MetricCheckCorrectTokenIDForTransferRoleEnableEpoch] = sm.uint64Metrics[common.MetricCheckCorrectTokenIDForTransferRoleEnableEpoch] + enableEpochsMetrics[common.MetricDisableExecByCallerEnableEpoch] = sm.uint64Metrics[common.MetricDisableExecByCallerEnableEpoch] + enableEpochsMetrics[common.MetricFailExecutionOnEveryAPIErrorEnableEpoch] = sm.uint64Metrics[common.MetricFailExecutionOnEveryAPIErrorEnableEpoch] + enableEpochsMetrics[common.MetricManagedCryptoAPIsEnableEpoch] = sm.uint64Metrics[common.MetricManagedCryptoAPIsEnableEpoch] + enableEpochsMetrics[common.MetricRefactorContextEnableEpoch] = sm.uint64Metrics[common.MetricRefactorContextEnableEpoch] + enableEpochsMetrics[common.MetricCheckFunctionArgumentEnableEpoch] = sm.uint64Metrics[common.MetricCheckFunctionArgumentEnableEpoch] + enableEpochsMetrics[common.MetricCheckExecuteOnReadOnlyEnableEpoch] = sm.uint64Metrics[common.MetricCheckExecuteOnReadOnlyEnableEpoch] + enableEpochsMetrics[common.MetricMiniBlockPartialExecutionEnableEpoch] = sm.uint64Metrics[common.MetricMiniBlockPartialExecutionEnableEpoch] + enableEpochsMetrics[common.MetricESDTMetadataContinuousCleanupEnableEpoch] = sm.uint64Metrics[common.MetricESDTMetadataContinuousCleanupEnableEpoch] + enableEpochsMetrics[common.MetricFixAsyncCallBackArgsListEnableEpoch] = sm.uint64Metrics[common.MetricFixAsyncCallBackArgsListEnableEpoch] + enableEpochsMetrics[common.MetricFixOldTokenLiquidityEnableEpoch] = sm.uint64Metrics[common.MetricFixOldTokenLiquidityEnableEpoch] + enableEpochsMetrics[common.MetricRuntimeMemStoreLimitEnableEpoch] = sm.uint64Metrics[common.MetricRuntimeMemStoreLimitEnableEpoch] + enableEpochsMetrics[common.MetricRuntimeCodeSizeFixEnableEpoch] = sm.uint64Metrics[common.MetricRuntimeCodeSizeFixEnableEpoch] + enableEpochsMetrics[common.MetricSetSenderInEeiOutputTransferEnableEpoch] = sm.uint64Metrics[common.MetricSetSenderInEeiOutputTransferEnableEpoch] + enableEpochsMetrics[common.MetricRefactorPeersMiniBlocksEnableEpoch] = sm.uint64Metrics[common.MetricRefactorPeersMiniBlocksEnableEpoch] + enableEpochsMetrics[common.MetricSCProcessorV2EnableEpoch] = sm.uint64Metrics[common.MetricSCProcessorV2EnableEpoch] + enableEpochsMetrics[common.MetricMaxBlockchainHookCountersEnableEpoch] = sm.uint64Metrics[common.MetricMaxBlockchainHookCountersEnableEpoch] + enableEpochsMetrics[common.MetricWipeSingleNFTLiquidityDecreaseEnableEpoch] = sm.uint64Metrics[common.MetricWipeSingleNFTLiquidityDecreaseEnableEpoch] + enableEpochsMetrics[common.MetricAlwaysSaveTokenMetaDataEnableEpoch] = sm.uint64Metrics[common.MetricAlwaysSaveTokenMetaDataEnableEpoch] + enableEpochsMetrics[common.MetricCleanUpInformativeSCRsEnableEpoch] = sm.uint64Metrics[common.MetricCleanUpInformativeSCRsEnableEpoch] enableEpochsMetrics[common.MetricSetGuardianEnableEpoch] = sm.uint64Metrics[common.MetricSetGuardianEnableEpoch] + enableEpochsMetrics[common.MetricSetScToScLogEventEnableEpoch] = sm.uint64Metrics[common.MetricSetScToScLogEventEnableEpoch] + enableEpochsMetrics[common.MetricRelayedNonceFixEnableEpoch] = sm.uint64Metrics[common.MetricRelayedNonceFixEnableEpoch] + enableEpochsMetrics[common.MetricDeterministicSortOnValidatorsInfoEnableEpoch] = sm.uint64Metrics[common.MetricDeterministicSortOnValidatorsInfoEnableEpoch] + enableEpochsMetrics[common.MetricKeepExecOrderOnCreatedSCRsEnableEpoch] = sm.uint64Metrics[common.MetricKeepExecOrderOnCreatedSCRsEnableEpoch] + enableEpochsMetrics[common.MetricMultiClaimOnDelegationEnableEpoch] = sm.uint64Metrics[common.MetricMultiClaimOnDelegationEnableEpoch] + enableEpochsMetrics[common.MetricChangeUsernameEnableEpoch] = sm.uint64Metrics[common.MetricChangeUsernameEnableEpoch] + enableEpochsMetrics[common.MetricAutoBalanceDataTriesEnableEpoch] = sm.uint64Metrics[common.MetricAutoBalanceDataTriesEnableEpoch] + enableEpochsMetrics[common.MetricMigrateDataTrieEnableEpoch] = sm.uint64Metrics[common.MetricMigrateDataTrieEnableEpoch] + enableEpochsMetrics[common.MetricConsistentTokensValuesLengthCheckEnableEpoch] = sm.uint64Metrics[common.MetricConsistentTokensValuesLengthCheckEnableEpoch] + enableEpochsMetrics[common.MetricFixDelegationChangeOwnerOnAccountEnableEpoch] = sm.uint64Metrics[common.MetricFixDelegationChangeOwnerOnAccountEnableEpoch] + enableEpochsMetrics[common.MetricDynamicGasCostForDataTrieStorageLoadEnableEpoch] = sm.uint64Metrics[common.MetricDynamicGasCostForDataTrieStorageLoadEnableEpoch] + enableEpochsMetrics[common.MetricNFTStopCreateEnableEpoch] = sm.uint64Metrics[common.MetricNFTStopCreateEnableEpoch] + enableEpochsMetrics[common.MetricChangeOwnerAddressCrossShardThroughSCEnableEpoch] = sm.uint64Metrics[common.MetricChangeOwnerAddressCrossShardThroughSCEnableEpoch] + enableEpochsMetrics[common.MetricFixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch] = sm.uint64Metrics[common.MetricFixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch] + enableEpochsMetrics[common.MetricCurrentRandomnessOnSortingEnableEpoch] = sm.uint64Metrics[common.MetricCurrentRandomnessOnSortingEnableEpoch] + enableEpochsMetrics[common.MetricStakeLimitsEnableEpoch] = sm.uint64Metrics[common.MetricStakeLimitsEnableEpoch] + enableEpochsMetrics[common.MetricStakingV4Step1EnableEpoch] = sm.uint64Metrics[common.MetricStakingV4Step1EnableEpoch] + enableEpochsMetrics[common.MetricStakingV4Step2EnableEpoch] = sm.uint64Metrics[common.MetricStakingV4Step2EnableEpoch] + enableEpochsMetrics[common.MetricStakingV4Step3EnableEpoch] = sm.uint64Metrics[common.MetricStakingV4Step3EnableEpoch] + enableEpochsMetrics[common.MetricCleanupAuctionOnLowWaitingListEnableEpoch] = sm.uint64Metrics[common.MetricCleanupAuctionOnLowWaitingListEnableEpoch] + enableEpochsMetrics[common.MetricAlwaysMergeContextsInEEIEnableEpoch] = sm.uint64Metrics[common.MetricAlwaysMergeContextsInEEIEnableEpoch] + enableEpochsMetrics[common.MetricDynamicESDTEnableEpoch] = sm.uint64Metrics[common.MetricDynamicESDTEnableEpoch] + enableEpochsMetrics[common.MetricEGLDInMultiTransferEnableEpoch] = sm.uint64Metrics[common.MetricEGLDInMultiTransferEnableEpoch] + enableEpochsMetrics[common.MetricCryptoOpcodesV2EnableEpoch] = sm.uint64Metrics[common.MetricCryptoOpcodesV2EnableEpoch] + enableEpochsMetrics[common.MetricMultiESDTNFTTransferAndExecuteByUserEnableEpoch] = sm.uint64Metrics[common.MetricMultiESDTNFTTransferAndExecuteByUserEnableEpoch] numNodesChangeConfig := sm.uint64Metrics[common.MetricMaxNodesChangeEnableEpoch+"_count"] @@ -341,6 +422,7 @@ func (sm *statusMetrics) saveUint64NetworkMetricsInMap(networkMetrics map[string currentNonce := sm.uint64Metrics[common.MetricNonce] nonceAtEpochStart := sm.uint64Metrics[common.MetricNonceAtEpochStart] networkMetrics[common.MetricNonce] = currentNonce + networkMetrics[common.MetricBlockTimestamp] = sm.uint64Metrics[common.MetricBlockTimestamp] networkMetrics[common.MetricHighestFinalBlock] = sm.uint64Metrics[common.MetricHighestFinalBlock] networkMetrics[common.MetricCurrentRound] = currentRound networkMetrics[common.MetricRoundAtEpochStart] = roundNumberAtEpochStart diff --git a/statusHandler/statusMetricsProvider_test.go b/statusHandler/statusMetricsProvider_test.go index 12831f384c6..02f33d62549 100644 --- a/statusHandler/statusMetricsProvider_test.go +++ b/statusHandler/statusMetricsProvider_test.go @@ -231,6 +231,7 @@ func TestStatusMetrics_NetworkMetrics(t *testing.T) { sm.SetUInt64Value(common.MetricCurrentRound, 200) sm.SetUInt64Value(common.MetricRoundAtEpochStart, 100) sm.SetUInt64Value(common.MetricNonce, 180) + sm.SetUInt64Value(common.MetricBlockTimestamp, 18000) sm.SetUInt64Value(common.MetricHighestFinalBlock, 181) sm.SetUInt64Value(common.MetricNonceAtEpochStart, 95) sm.SetUInt64Value(common.MetricEpochNumber, 1) @@ -240,6 +241,7 @@ func TestStatusMetrics_NetworkMetrics(t *testing.T) { "erd_current_round": uint64(200), "erd_round_at_epoch_start": uint64(100), "erd_nonce": uint64(180), + "erd_block_timestamp": uint64(18000), "erd_highest_final_nonce": uint64(181), "erd_nonce_at_epoch_start": uint64(95), "erd_epoch_number": uint64(1), @@ -270,6 +272,7 @@ func TestStatusMetrics_StatusMetricsMapWithoutP2P(t *testing.T) { sm.SetUInt64Value(common.MetricCurrentRound, 100) sm.SetUInt64Value(common.MetricRoundAtEpochStart, 200) sm.SetUInt64Value(common.MetricNonce, 300) + sm.SetUInt64Value(common.MetricBlockTimestamp, 30000) sm.SetStringValue(common.MetricAppVersion, "400") sm.SetUInt64Value(common.MetricRoundsPassedInCurrentEpoch, 95) sm.SetUInt64Value(common.MetricNoncesPassedInCurrentEpoch, 1) @@ -281,6 +284,7 @@ func TestStatusMetrics_StatusMetricsMapWithoutP2P(t *testing.T) { require.Equal(t, uint64(100), res[common.MetricCurrentRound]) require.Equal(t, uint64(200), res[common.MetricRoundAtEpochStart]) require.Equal(t, uint64(300), res[common.MetricNonce]) + require.Equal(t, uint64(30000), res[common.MetricBlockTimestamp]) require.Equal(t, "400", res[common.MetricAppVersion]) require.NotContains(t, res, common.MetricRoundsPassedInCurrentEpoch) require.NotContains(t, res, common.MetricNoncesPassedInCurrentEpoch) @@ -293,30 +297,110 @@ func TestStatusMetrics_EnableEpochMetrics(t *testing.T) { sm := statusHandler.NewStatusMetrics() - sm.SetUInt64Value(common.MetricScDeployEnableEpoch, 4) - sm.SetUInt64Value(common.MetricBuiltInFunctionsEnableEpoch, 2) - sm.SetUInt64Value(common.MetricRelayedTransactionsEnableEpoch, 4) - sm.SetUInt64Value(common.MetricPenalizedTooMuchGasEnableEpoch, 2) - sm.SetUInt64Value(common.MetricSwitchJailWaitingEnableEpoch, 2) - sm.SetUInt64Value(common.MetricSwitchHysteresisForMinNodesEnableEpoch, 4) - sm.SetUInt64Value(common.MetricBelowSignedThresholdEnableEpoch, 2) - sm.SetUInt64Value(common.MetricTransactionSignedWithTxHashEnableEpoch, 4) - sm.SetUInt64Value(common.MetricMetaProtectionEnableEpoch, 6) - sm.SetUInt64Value(common.MetricAheadOfTimeGasUsageEnableEpoch, 2) - sm.SetUInt64Value(common.MetricGasPriceModifierEnableEpoch, 2) - sm.SetUInt64Value(common.MetricRepairCallbackEnableEpoch, 2) - sm.SetUInt64Value(common.MetricBlockGasAndFreeRecheckEnableEpoch, 2) - sm.SetUInt64Value(common.MetricStakingV2EnableEpoch, 2) - sm.SetUInt64Value(common.MetricStakeEnableEpoch, 2) - sm.SetUInt64Value(common.MetricDoubleKeyProtectionEnableEpoch, 2) - sm.SetUInt64Value(common.MetricEsdtEnableEpoch, 4) - sm.SetUInt64Value(common.MetricGovernanceEnableEpoch, 3) - sm.SetUInt64Value(common.MetricDelegationManagerEnableEpoch, 1) - sm.SetUInt64Value(common.MetricDelegationSmartContractEnableEpoch, 2) - sm.SetUInt64Value(common.MetricIncrementSCRNonceInMultiTransferEnableEpoch, 3) - sm.SetUInt64Value(common.MetricBalanceWaitingListsEnableEpoch, 4) - sm.SetUInt64Value(common.MetricWaitingListFixEnableEpoch, 1) - sm.SetUInt64Value(common.MetricSetGuardianEnableEpoch, 3) + sm.SetUInt64Value(common.MetricScDeployEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricBuiltInFunctionsEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricRelayedTransactionsEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricPenalizedTooMuchGasEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricSwitchJailWaitingEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricSwitchHysteresisForMinNodesEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricBelowSignedThresholdEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricTransactionSignedWithTxHashEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricMetaProtectionEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricAheadOfTimeGasUsageEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricGasPriceModifierEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricRepairCallbackEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricBlockGasAndFreeRecheckEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricStakingV2EnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricStakeEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricDoubleKeyProtectionEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricEsdtEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricGovernanceEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricDelegationManagerEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricDelegationSmartContractEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricCorrectLastUnjailedEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricBalanceWaitingListsEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricReturnDataToLastTransferEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricSenderInOutTransferEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricRelayedTransactionsV2EnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricUnbondTokensV2EnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricSaveJailedAlwaysEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricValidatorToDelegationEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricReDelegateBelowMinCheckEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricIncrementSCRNonceInMultiTransferEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricScheduledMiniBlocksEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricESDTMultiTransferEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricGlobalMintBurnDisableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricESDTTransferRoleEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricComputeRewardCheckpointEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricSCRSizeInvariantCheckEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricBackwardCompSaveKeyValueEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricESDTNFTCreateOnMultiShardEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricMetaESDTSetEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricAddTokensToDelegationEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricMultiESDTTransferFixOnCallBackOnEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricOptimizeGasUsedInCrossMiniBlocksEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricCorrectFirstQueuedEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricCorrectJailedNotUnstakedEmptyQueueEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricFixOOGReturnCodeEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricRemoveNonUpdatedStorageEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricDeleteDelegatorAfterClaimRewardsEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricOptimizeNFTStoreEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricCreateNFTThroughExecByCallerEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricStopDecreasingValidatorRatingWhenStuckEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricFrontRunningProtectionEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricIsPayableBySCEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricStorageAPICostOptimizationEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricTransformToMultiShardCreateEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricESDTRegisterAndSetAllRolesEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricDoNotReturnOldBlockInBlockchainHookEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricAddFailedRelayedTxToInvalidMBsDisableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricSCRSizeInvariantOnBuiltInResultEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricCheckCorrectTokenIDForTransferRoleEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricDisableExecByCallerEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricFailExecutionOnEveryAPIErrorEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricManagedCryptoAPIsEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricRefactorContextEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricCheckFunctionArgumentEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricCheckExecuteOnReadOnlyEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricMiniBlockPartialExecutionEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricESDTMetadataContinuousCleanupEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricFixAsyncCallBackArgsListEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricFixOldTokenLiquidityEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricRuntimeMemStoreLimitEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricRuntimeCodeSizeFixEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricSetSenderInEeiOutputTransferEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricRefactorPeersMiniBlocksEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricSCProcessorV2EnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricMaxBlockchainHookCountersEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricWipeSingleNFTLiquidityDecreaseEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricAlwaysSaveTokenMetaDataEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricCleanUpInformativeSCRsEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricSetGuardianEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricSetScToScLogEventEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricRelayedNonceFixEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricDeterministicSortOnValidatorsInfoEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricKeepExecOrderOnCreatedSCRsEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricMultiClaimOnDelegationEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricChangeUsernameEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricAutoBalanceDataTriesEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricMigrateDataTrieEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricConsistentTokensValuesLengthCheckEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricFixDelegationChangeOwnerOnAccountEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricDynamicGasCostForDataTrieStorageLoadEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricNFTStopCreateEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricChangeOwnerAddressCrossShardThroughSCEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricFixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricCurrentRandomnessOnSortingEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricStakeLimitsEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricStakingV4Step1EnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricStakingV4Step2EnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricStakingV4Step3EnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricCleanupAuctionOnLowWaitingListEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricAlwaysMergeContextsInEEIEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricDynamicESDTEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricEGLDInMultiTransferEnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricCryptoOpcodesV2EnableEpoch, uint64(4)) + sm.SetUInt64Value(common.MetricMultiESDTNFTTransferAndExecuteByUserEnableEpoch, uint64(4)) maxNodesChangeConfig := []map[string]uint64{ { @@ -343,30 +427,110 @@ func TestStatusMetrics_EnableEpochMetrics(t *testing.T) { sm.SetUInt64Value(common.MetricMaxNodesChangeEnableEpoch+"_count", uint64(len(maxNodesChangeConfig))) expectedMetrics := map[string]interface{}{ - common.MetricScDeployEnableEpoch: uint64(4), - common.MetricBuiltInFunctionsEnableEpoch: uint64(2), - common.MetricRelayedTransactionsEnableEpoch: uint64(4), - common.MetricPenalizedTooMuchGasEnableEpoch: uint64(2), - common.MetricSwitchJailWaitingEnableEpoch: uint64(2), - common.MetricSwitchHysteresisForMinNodesEnableEpoch: uint64(4), - common.MetricBelowSignedThresholdEnableEpoch: uint64(2), - common.MetricTransactionSignedWithTxHashEnableEpoch: uint64(4), - common.MetricMetaProtectionEnableEpoch: uint64(6), - common.MetricAheadOfTimeGasUsageEnableEpoch: uint64(2), - common.MetricGasPriceModifierEnableEpoch: uint64(2), - common.MetricRepairCallbackEnableEpoch: uint64(2), - common.MetricBlockGasAndFreeRecheckEnableEpoch: uint64(2), - common.MetricStakingV2EnableEpoch: uint64(2), - common.MetricStakeEnableEpoch: uint64(2), - common.MetricDoubleKeyProtectionEnableEpoch: uint64(2), - common.MetricEsdtEnableEpoch: uint64(4), - common.MetricGovernanceEnableEpoch: uint64(3), - common.MetricDelegationManagerEnableEpoch: uint64(1), - common.MetricDelegationSmartContractEnableEpoch: uint64(2), - common.MetricIncrementSCRNonceInMultiTransferEnableEpoch: uint64(3), - common.MetricBalanceWaitingListsEnableEpoch: uint64(4), - common.MetricWaitingListFixEnableEpoch: uint64(1), - common.MetricSetGuardianEnableEpoch: uint64(3), + common.MetricScDeployEnableEpoch: uint64(4), + common.MetricBuiltInFunctionsEnableEpoch: uint64(4), + common.MetricRelayedTransactionsEnableEpoch: uint64(4), + common.MetricPenalizedTooMuchGasEnableEpoch: uint64(4), + common.MetricSwitchJailWaitingEnableEpoch: uint64(4), + common.MetricSwitchHysteresisForMinNodesEnableEpoch: uint64(4), + common.MetricBelowSignedThresholdEnableEpoch: uint64(4), + common.MetricTransactionSignedWithTxHashEnableEpoch: uint64(4), + common.MetricMetaProtectionEnableEpoch: uint64(4), + common.MetricAheadOfTimeGasUsageEnableEpoch: uint64(4), + common.MetricGasPriceModifierEnableEpoch: uint64(4), + common.MetricRepairCallbackEnableEpoch: uint64(4), + common.MetricBlockGasAndFreeRecheckEnableEpoch: uint64(4), + common.MetricStakingV2EnableEpoch: uint64(4), + common.MetricStakeEnableEpoch: uint64(4), + common.MetricDoubleKeyProtectionEnableEpoch: uint64(4), + common.MetricEsdtEnableEpoch: uint64(4), + common.MetricGovernanceEnableEpoch: uint64(4), + common.MetricDelegationManagerEnableEpoch: uint64(4), + common.MetricDelegationSmartContractEnableEpoch: uint64(4), + common.MetricCorrectLastUnjailedEnableEpoch: uint64(4), + common.MetricBalanceWaitingListsEnableEpoch: uint64(4), + common.MetricReturnDataToLastTransferEnableEpoch: uint64(4), + common.MetricSenderInOutTransferEnableEpoch: uint64(4), + common.MetricRelayedTransactionsV2EnableEpoch: uint64(4), + common.MetricUnbondTokensV2EnableEpoch: uint64(4), + common.MetricSaveJailedAlwaysEnableEpoch: uint64(4), + common.MetricValidatorToDelegationEnableEpoch: uint64(4), + common.MetricReDelegateBelowMinCheckEnableEpoch: uint64(4), + common.MetricIncrementSCRNonceInMultiTransferEnableEpoch: uint64(4), + common.MetricScheduledMiniBlocksEnableEpoch: uint64(4), + common.MetricESDTMultiTransferEnableEpoch: uint64(4), + common.MetricGlobalMintBurnDisableEpoch: uint64(4), + common.MetricESDTTransferRoleEnableEpoch: uint64(4), + common.MetricComputeRewardCheckpointEnableEpoch: uint64(4), + common.MetricSCRSizeInvariantCheckEnableEpoch: uint64(4), + common.MetricBackwardCompSaveKeyValueEnableEpoch: uint64(4), + common.MetricESDTNFTCreateOnMultiShardEnableEpoch: uint64(4), + common.MetricMetaESDTSetEnableEpoch: uint64(4), + common.MetricAddTokensToDelegationEnableEpoch: uint64(4), + common.MetricMultiESDTTransferFixOnCallBackOnEnableEpoch: uint64(4), + common.MetricOptimizeGasUsedInCrossMiniBlocksEnableEpoch: uint64(4), + common.MetricCorrectFirstQueuedEpoch: uint64(4), + common.MetricCorrectJailedNotUnstakedEmptyQueueEpoch: uint64(4), + common.MetricFixOOGReturnCodeEnableEpoch: uint64(4), + common.MetricRemoveNonUpdatedStorageEnableEpoch: uint64(4), + common.MetricDeleteDelegatorAfterClaimRewardsEnableEpoch: uint64(4), + common.MetricOptimizeNFTStoreEnableEpoch: uint64(4), + common.MetricCreateNFTThroughExecByCallerEnableEpoch: uint64(4), + common.MetricStopDecreasingValidatorRatingWhenStuckEnableEpoch: uint64(4), + common.MetricFrontRunningProtectionEnableEpoch: uint64(4), + common.MetricIsPayableBySCEnableEpoch: uint64(4), + common.MetricStorageAPICostOptimizationEnableEpoch: uint64(4), + common.MetricTransformToMultiShardCreateEnableEpoch: uint64(4), + common.MetricESDTRegisterAndSetAllRolesEnableEpoch: uint64(4), + common.MetricDoNotReturnOldBlockInBlockchainHookEnableEpoch: uint64(4), + common.MetricAddFailedRelayedTxToInvalidMBsDisableEpoch: uint64(4), + common.MetricSCRSizeInvariantOnBuiltInResultEnableEpoch: uint64(4), + common.MetricCheckCorrectTokenIDForTransferRoleEnableEpoch: uint64(4), + common.MetricDisableExecByCallerEnableEpoch: uint64(4), + common.MetricFailExecutionOnEveryAPIErrorEnableEpoch: uint64(4), + common.MetricManagedCryptoAPIsEnableEpoch: uint64(4), + common.MetricRefactorContextEnableEpoch: uint64(4), + common.MetricCheckFunctionArgumentEnableEpoch: uint64(4), + common.MetricCheckExecuteOnReadOnlyEnableEpoch: uint64(4), + common.MetricMiniBlockPartialExecutionEnableEpoch: uint64(4), + common.MetricESDTMetadataContinuousCleanupEnableEpoch: uint64(4), + common.MetricFixAsyncCallBackArgsListEnableEpoch: uint64(4), + common.MetricFixOldTokenLiquidityEnableEpoch: uint64(4), + common.MetricRuntimeMemStoreLimitEnableEpoch: uint64(4), + common.MetricRuntimeCodeSizeFixEnableEpoch: uint64(4), + common.MetricSetSenderInEeiOutputTransferEnableEpoch: uint64(4), + common.MetricRefactorPeersMiniBlocksEnableEpoch: uint64(4), + common.MetricSCProcessorV2EnableEpoch: uint64(4), + common.MetricMaxBlockchainHookCountersEnableEpoch: uint64(4), + common.MetricWipeSingleNFTLiquidityDecreaseEnableEpoch: uint64(4), + common.MetricAlwaysSaveTokenMetaDataEnableEpoch: uint64(4), + common.MetricCleanUpInformativeSCRsEnableEpoch: uint64(4), + common.MetricSetGuardianEnableEpoch: uint64(4), + common.MetricSetScToScLogEventEnableEpoch: uint64(4), + common.MetricRelayedNonceFixEnableEpoch: uint64(4), + common.MetricDeterministicSortOnValidatorsInfoEnableEpoch: uint64(4), + common.MetricKeepExecOrderOnCreatedSCRsEnableEpoch: uint64(4), + common.MetricMultiClaimOnDelegationEnableEpoch: uint64(4), + common.MetricChangeUsernameEnableEpoch: uint64(4), + common.MetricAutoBalanceDataTriesEnableEpoch: uint64(4), + common.MetricMigrateDataTrieEnableEpoch: uint64(4), + common.MetricConsistentTokensValuesLengthCheckEnableEpoch: uint64(4), + common.MetricFixDelegationChangeOwnerOnAccountEnableEpoch: uint64(4), + common.MetricDynamicGasCostForDataTrieStorageLoadEnableEpoch: uint64(4), + common.MetricNFTStopCreateEnableEpoch: uint64(4), + common.MetricChangeOwnerAddressCrossShardThroughSCEnableEpoch: uint64(4), + common.MetricFixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: uint64(4), + common.MetricCurrentRandomnessOnSortingEnableEpoch: uint64(4), + common.MetricStakeLimitsEnableEpoch: uint64(4), + common.MetricStakingV4Step1EnableEpoch: uint64(4), + common.MetricStakingV4Step2EnableEpoch: uint64(4), + common.MetricStakingV4Step3EnableEpoch: uint64(4), + common.MetricCleanupAuctionOnLowWaitingListEnableEpoch: uint64(4), + common.MetricAlwaysMergeContextsInEEIEnableEpoch: uint64(4), + common.MetricDynamicESDTEnableEpoch: uint64(4), + common.MetricEGLDInMultiTransferEnableEpoch: uint64(4), + common.MetricCryptoOpcodesV2EnableEpoch: uint64(4), + common.MetricMultiESDTNFTTransferAndExecuteByUserEnableEpoch: uint64(4), common.MetricMaxNodesChangeEnableEpoch: []map[string]interface{}{ { diff --git a/storage/constants.go b/storage/constants.go index b78021138c7..9cd37571521 100644 --- a/storage/constants.go +++ b/storage/constants.go @@ -1,14 +1,14 @@ package storage import ( - "github.com/multiversx/mx-chain-storage-go/storageUnit" + "github.com/multiversx/mx-chain-storage-go/common" ) // MaxRetriesToCreateDB represents the maximum number of times to try to create DB if it failed -const MaxRetriesToCreateDB = storageUnit.MaxRetriesToCreateDB +const MaxRetriesToCreateDB = common.MaxRetriesToCreateDB // SleepTimeBetweenCreateDBRetries represents the number of seconds to sleep between DB creates -const SleepTimeBetweenCreateDBRetries = storageUnit.SleepTimeBetweenCreateDBRetries +const SleepTimeBetweenCreateDBRetries = common.SleepTimeBetweenCreateDBRetries // PathShardPlaceholder represents the placeholder for the shard ID in paths const PathShardPlaceholder = "[S]" diff --git a/storage/factory/dbConfigHandler.go b/storage/factory/dbConfigHandler.go index 28ba8b5dcdb..468c42a2ee7 100644 --- a/storage/factory/dbConfigHandler.go +++ b/storage/factory/dbConfigHandler.go @@ -1,6 +1,7 @@ package factory import ( + "errors" "fmt" "os" "path/filepath" @@ -10,35 +11,33 @@ import ( ) const ( - dbConfigFileName = "config.toml" - defaultType = "LvlDBSerial" + dbConfigFileName = "config.toml" + defaultType = "LvlDBSerial" + defaultBatchDelaySeconds = 2 + defaultMaxBatchSize = 100 + defaultMaxOpenFiles = 10 + defaultUseTmpAsFilePath = false +) + +var ( + errInvalidConfiguration = errors.New("invalid configuration") ) type dbConfigHandler struct { - dbType string - batchDelaySeconds int - maxBatchSize int - maxOpenFiles int - shardIDProviderType string - numShards int32 + conf config.DBConfig } // NewDBConfigHandler will create a new db config handler instance func NewDBConfigHandler(config config.DBConfig) *dbConfigHandler { return &dbConfigHandler{ - dbType: config.Type, - batchDelaySeconds: config.BatchDelaySeconds, - maxBatchSize: config.MaxBatchSize, - maxOpenFiles: config.MaxOpenFiles, - shardIDProviderType: config.ShardIDProviderType, - numShards: config.NumShards, + conf: config, } } // GetDBConfig will get the db config based on path func (dh *dbConfigHandler) GetDBConfig(path string) (*config.DBConfig, error) { dbConfigFromFile := &config.DBConfig{} - err := core.LoadTomlFile(dbConfigFromFile, getPersisterConfigFilePath(path)) + err := readCorrectConfigurationFromToml(dbConfigFromFile, getPersisterConfigFilePath(path)) if err == nil { log.Debug("GetDBConfig: loaded db config from toml config file", "config path", path, @@ -51,9 +50,10 @@ func (dh *dbConfigHandler) GetDBConfig(path string) (*config.DBConfig, error) { if !empty { dbConfig := &config.DBConfig{ Type: defaultType, - BatchDelaySeconds: dh.batchDelaySeconds, - MaxBatchSize: dh.maxBatchSize, - MaxOpenFiles: dh.maxOpenFiles, + BatchDelaySeconds: dh.conf.BatchDelaySeconds, + MaxBatchSize: dh.conf.MaxBatchSize, + MaxOpenFiles: dh.conf.MaxOpenFiles, + UseTmpAsFilePath: dh.conf.UseTmpAsFilePath, } log.Debug("GetDBConfig: loaded default db config", @@ -63,20 +63,25 @@ func (dh *dbConfigHandler) GetDBConfig(path string) (*config.DBConfig, error) { return dbConfig, nil } - dbConfig := &config.DBConfig{ - Type: dh.dbType, - BatchDelaySeconds: dh.batchDelaySeconds, - MaxBatchSize: dh.maxBatchSize, - MaxOpenFiles: dh.maxOpenFiles, - ShardIDProviderType: dh.shardIDProviderType, - NumShards: dh.numShards, - } - log.Debug("GetDBConfig: loaded db config from main config file", - "configuration", fmt.Sprintf("%+v", dbConfig), + "configuration", fmt.Sprintf("%+v", dh.conf), ) - return dbConfig, nil + return &dh.conf, nil +} + +func readCorrectConfigurationFromToml(dbConfig *config.DBConfig, filePath string) error { + err := core.LoadTomlFile(dbConfig, filePath) + if err != nil { + return err + } + + isInvalidConfig := len(dbConfig.Type) == 0 || dbConfig.MaxBatchSize <= 0 || dbConfig.BatchDelaySeconds <= 0 || dbConfig.MaxOpenFiles <= 0 + if isInvalidConfig { + return errInvalidConfiguration + } + + return nil } // SaveDBConfigToFilePath will save the provided db config to specified path @@ -92,13 +97,6 @@ func (dh *dbConfigHandler) SaveDBConfigToFilePath(path string, dbConfig *config. configFilePath := getPersisterConfigFilePath(path) - loadedDBConfig := &config.DBConfig{} - err = core.LoadTomlFile(loadedDBConfig, configFilePath) - if err == nil { - // config file already exists, no need to save config - return nil - } - err = core.SaveTomlFile(dbConfig, configFilePath) if err != nil { return err diff --git a/storage/factory/dbConfigHandler_test.go b/storage/factory/dbConfigHandler_test.go index 73fbfa55b81..910683d732d 100644 --- a/storage/factory/dbConfigHandler_test.go +++ b/storage/factory/dbConfigHandler_test.go @@ -2,11 +2,13 @@ package factory_test import ( "os" + "path" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage/factory" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -88,6 +90,37 @@ func TestDBConfigHandler_GetDBConfig(t *testing.T) { require.Nil(t, err) require.Equal(t, expectedDBConfig, conf) }) + t.Run("empty config.toml file, load default db config", func(t *testing.T) { + t.Parallel() + + testConfig := createDefaultDBConfig() + testConfig.BatchDelaySeconds = 37 + testConfig.MaxBatchSize = 38 + testConfig.MaxOpenFiles = 39 + testConfig.ShardIDProviderType = "BinarySplit" + testConfig.NumShards = 4 + pf := factory.NewDBConfigHandler(testConfig) + + dirPath := t.TempDir() + + f, _ := os.Create(path.Join(dirPath, factory.DBConfigFileName)) + _ = f.Close() + + expectedDBConfig := &config.DBConfig{ + FilePath: "", + Type: factory.DefaultType, + BatchDelaySeconds: testConfig.BatchDelaySeconds, + MaxBatchSize: testConfig.MaxBatchSize, + MaxOpenFiles: testConfig.MaxOpenFiles, + UseTmpAsFilePath: false, + ShardIDProviderType: "", + NumShards: 0, + } + + conf, err := pf.GetDBConfig(dirPath) + require.Nil(t, err) + require.Equal(t, expectedDBConfig, conf) + }) t.Run("empty dir, load db config from main config", func(t *testing.T) { t.Parallel() @@ -146,22 +179,33 @@ func TestDBConfigHandler_SaveDBConfigToFilePath(t *testing.T) { err := pf.SaveDBConfigToFilePath("no/valid/path", &dbConfig) require.Nil(t, err) }) - - t.Run("config file already present, should not fail", func(t *testing.T) { + t.Run("config file already present, should not fail and should rewrite", func(t *testing.T) { t.Parallel() - dbConfig := createDefaultDBConfig() + dbConfig1 := createDefaultDBConfig() + dbConfig1.MaxOpenFiles = 37 + dbConfig1.Type = "dbconfig1" dirPath := t.TempDir() configPath := factory.GetPersisterConfigFilePath(dirPath) - err := core.SaveTomlFile(dbConfig, configPath) + err := core.SaveTomlFile(dbConfig1, configPath) require.Nil(t, err) - pf := factory.NewDBConfigHandler(dbConfig) - err = pf.SaveDBConfigToFilePath(dirPath, &dbConfig) + pf := factory.NewDBConfigHandler(dbConfig1) + + dbConfig2 := createDefaultDBConfig() + dbConfig2.MaxOpenFiles = 38 + dbConfig2.Type = "dbconfig2" + + err = pf.SaveDBConfigToFilePath(dirPath, &dbConfig2) + require.Nil(t, err) + + loadedDBConfig := &config.DBConfig{} + err = core.LoadTomlFile(loadedDBConfig, path.Join(dirPath, "config.toml")) require.Nil(t, err) - }) + assert.Equal(t, dbConfig2, *loadedDBConfig) + }) t.Run("should work", func(t *testing.T) { t.Parallel() diff --git a/storage/factory/export_test.go b/storage/factory/export_test.go index 23317b7d4cf..854f52e9cc3 100644 --- a/storage/factory/export_test.go +++ b/storage/factory/export_test.go @@ -8,6 +8,9 @@ import ( // DefaultType exports the defaultType const to be used in tests const DefaultType = defaultType +// DBConfigFileName exports the dbConfigFileName const to be used in tests +const DBConfigFileName = dbConfigFileName + // GetPersisterConfigFilePath - func GetPersisterConfigFilePath(path string) string { return getPersisterConfigFilePath(path) @@ -22,3 +25,8 @@ func NewPersisterCreator(config config.DBConfig) *persisterCreator { func (pc *persisterCreator) CreateShardIDProvider() (storage.ShardIDProvider, error) { return pc.createShardIDProvider() } + +// GetTmpFilePath - +func GetTmpFilePath(path string) (string, error) { + return getTmpFilePath(path) +} diff --git a/storage/factory/openStorage.go b/storage/factory/openStorage.go index 80dae5bc39c..0effada6f04 100644 --- a/storage/factory/openStorage.go +++ b/storage/factory/openStorage.go @@ -3,7 +3,6 @@ package factory import ( "fmt" "path/filepath" - "time" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/config" @@ -56,8 +55,7 @@ func (o *openStorageUnits) GetMostRecentStorageUnit(dbConfig config.DBConfig) (s return nil, err } - dbConfigHandler := NewDBConfigHandler(dbConfig) - persisterFactory, err := NewPersisterFactory(dbConfigHandler) + persisterFactory, err := NewPersisterFactory(dbConfig) if err != nil { return nil, err } @@ -74,7 +72,7 @@ func (o *openStorageUnits) GetMostRecentStorageUnit(dbConfig config.DBConfig) (s persisterPath := o.getPersisterPath(pathWithoutShard, mostRecentShard, dbConfig) - persister, err := createDB(persisterFactory, persisterPath) + persister, err := persisterFactory.CreateWithRetries(persisterPath) if err != nil { return nil, err } @@ -112,13 +110,12 @@ func (o *openStorageUnits) OpenDB(dbConfig config.DBConfig, shardID uint32, epoc parentDir := o.latestStorageDataProvider.GetParentDirectory() pathWithoutShard := o.getPathWithoutShard(parentDir, epoch) persisterPath := o.getPersisterPath(pathWithoutShard, fmt.Sprintf("%d", shardID), dbConfig) - dbConfigHandler := NewDBConfigHandler(dbConfig) - persisterFactory, err := NewPersisterFactory(dbConfigHandler) + persisterFactory, err := NewPersisterFactory(dbConfig) if err != nil { return nil, err } - persister, err := createDB(persisterFactory, persisterPath) + persister, err := persisterFactory.CreateWithRetries(persisterPath) if err != nil { return nil, err } @@ -131,21 +128,6 @@ func (o *openStorageUnits) OpenDB(dbConfig config.DBConfig, shardID uint32, epoc return storageunit.NewStorageUnit(lruCache, persister) } -func createDB(persisterFactory *PersisterFactory, persisterPath string) (storage.Persister, error) { - var persister storage.Persister - var err error - for i := 0; i < storage.MaxRetriesToCreateDB; i++ { - persister, err = persisterFactory.Create(persisterPath) - if err == nil { - return persister, nil - } - log.Warn("Create Persister failed", "path", persisterPath, "error", err) - //TODO: extract this in a parameter and inject it - time.Sleep(storage.SleepTimeBetweenCreateDBRetries) - } - return nil, err -} - func (o *openStorageUnits) getMostUpToDateDirectory( dbConfig config.DBConfig, pathWithoutShard string, diff --git a/storage/factory/persisterCreator.go b/storage/factory/persisterCreator.go index 1357fc37ae4..0d17287815e 100644 --- a/storage/factory/persisterCreator.go +++ b/storage/factory/persisterCreator.go @@ -5,39 +5,29 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" "github.com/multiversx/mx-chain-go/storage/storageunit" + "github.com/multiversx/mx-chain-storage-go/factory" ) const minNumShards = 2 // persisterCreator is the factory which will handle creating new persisters type persisterCreator struct { - dbType string - batchDelaySeconds int - maxBatchSize int - maxOpenFiles int - shardIDProviderType string - numShards int32 + conf config.DBConfig } func newPersisterCreator(config config.DBConfig) *persisterCreator { return &persisterCreator{ - dbType: config.Type, - batchDelaySeconds: config.BatchDelaySeconds, - maxBatchSize: config.MaxBatchSize, - maxOpenFiles: config.MaxOpenFiles, - shardIDProviderType: config.ShardIDProviderType, - numShards: config.NumShards, + conf: config, } } // Create will create the persister for the provided path -// TODO: refactor to use max tries mechanism func (pc *persisterCreator) Create(path string) (storage.Persister, error) { if len(path) == 0 { return nil, storage.ErrInvalidFilePath } - if pc.numShards < minNumShards { + if pc.conf.NumShards < minNumShards { return pc.CreateBasePersister(path) } @@ -50,23 +40,23 @@ func (pc *persisterCreator) Create(path string) (storage.Persister, error) { // CreateBasePersister will create base the persister for the provided path func (pc *persisterCreator) CreateBasePersister(path string) (storage.Persister, error) { - var dbType = storageunit.DBType(pc.dbType) - switch dbType { - case storageunit.LvlDB: - return database.NewLevelDB(path, pc.batchDelaySeconds, pc.maxBatchSize, pc.maxOpenFiles) - case storageunit.LvlDBSerial: - return database.NewSerialDB(path, pc.batchDelaySeconds, pc.maxBatchSize, pc.maxOpenFiles) - case storageunit.MemoryDB: - return database.NewMemDB(), nil - default: - return nil, storage.ErrNotSupportedDBType + var dbType = storageunit.DBType(pc.conf.Type) + + argsDB := factory.ArgDB{ + DBType: dbType, + Path: path, + BatchDelaySeconds: pc.conf.BatchDelaySeconds, + MaxBatchSize: pc.conf.MaxBatchSize, + MaxOpenFiles: pc.conf.MaxOpenFiles, } + + return storageunit.NewDB(argsDB) } func (pc *persisterCreator) createShardIDProvider() (storage.ShardIDProvider, error) { - switch storageunit.ShardIDProviderType(pc.shardIDProviderType) { + switch storageunit.ShardIDProviderType(pc.conf.ShardIDProviderType) { case storageunit.BinarySplit: - return database.NewShardIDProvider(pc.numShards) + return database.NewShardIDProvider(pc.conf.NumShards) default: return nil, storage.ErrNotSupportedShardIDProviderType } diff --git a/storage/factory/persisterCreator_test.go b/storage/factory/persisterCreator_test.go index a0fdef7e1ef..b1a4cc63796 100644 --- a/storage/factory/persisterCreator_test.go +++ b/storage/factory/persisterCreator_test.go @@ -38,6 +38,19 @@ func TestPersisterCreator_Create(t *testing.T) { require.Equal(t, storage.ErrInvalidFilePath, err) }) + t.Run("use tmp as file path", func(t *testing.T) { + t.Parallel() + + conf := createDefaultDBConfig() + conf.UseTmpAsFilePath = true + + pc := factory.NewPersisterCreator(conf) + + p, err := pc.Create("path1") + require.Nil(t, err) + require.NotNil(t, p) + }) + t.Run("should create non sharded persister", func(t *testing.T) { t.Parallel() diff --git a/storage/factory/persisterFactory.go b/storage/factory/persisterFactory.go index a1305ec2184..321ddf59118 100644 --- a/storage/factory/persisterFactory.go +++ b/storage/factory/persisterFactory.go @@ -1,29 +1,51 @@ package factory import ( - "github.com/multiversx/mx-chain-core-go/core/check" + "os" + "path" + "time" + + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/disabled" ) -// PersisterFactory is the factory which will handle creating new databases -type PersisterFactory struct { +// persisterFactory is the factory which will handle creating new databases +type persisterFactory struct { dbConfigHandler storage.DBConfigHandler } -// NewPersisterFactory will return a new instance of a PersisterFactory -func NewPersisterFactory(dbConfigHandler storage.DBConfigHandler) (*PersisterFactory, error) { - if check.IfNil(dbConfigHandler) { - return nil, storage.ErrNilDBConfigHandler - } +// NewPersisterFactory will return a new instance of persister factory +func NewPersisterFactory(config config.DBConfig) (*persisterFactory, error) { + dbConfigHandler := NewDBConfigHandler(config) - return &PersisterFactory{ + return &persisterFactory{ dbConfigHandler: dbConfigHandler, }, nil } +// CreateWithRetries will return a new instance of a DB with a given path +// It will try to create db multiple times +func (pf *persisterFactory) CreateWithRetries(path string) (storage.Persister, error) { + var persister storage.Persister + var err error + + for i := 0; i < storage.MaxRetriesToCreateDB; i++ { + persister, err = pf.Create(path) + if err == nil { + return persister, nil + } + log.Warn("Create Persister failed", "path", path, "error", err) + + // TODO: extract this in a parameter and inject it + time.Sleep(storage.SleepTimeBetweenCreateDBRetries) + } + + return nil, err +} + // Create will return a new instance of a DB with a given path -func (pf *PersisterFactory) Create(path string) (storage.Persister, error) { +func (pf *persisterFactory) Create(path string) (storage.Persister, error) { if len(path) == 0 { return nil, storage.ErrInvalidFilePath } @@ -33,6 +55,15 @@ func (pf *PersisterFactory) Create(path string) (storage.Persister, error) { return nil, err } + if dbConfig.UseTmpAsFilePath { + filePath, err := getTmpFilePath(path) + if err != nil { + return nil, err + } + + path = filePath + } + pc := newPersisterCreator(*dbConfig) persister, err := pc.Create(path) @@ -49,11 +80,16 @@ func (pf *PersisterFactory) Create(path string) (storage.Persister, error) { } // CreateDisabled will return a new disabled persister -func (pf *PersisterFactory) CreateDisabled() storage.Persister { +func (pf *persisterFactory) CreateDisabled() storage.Persister { return disabled.NewErrorDisabledPersister() } +func getTmpFilePath(p string) (string, error) { + _, file := path.Split(p) + return os.MkdirTemp("", file) +} + // IsInterfaceNil returns true if there is no value under the interface -func (pf *PersisterFactory) IsInterfaceNil() bool { +func (pf *persisterFactory) IsInterfaceNil() bool { return pf == nil } diff --git a/storage/factory/persisterFactory_test.go b/storage/factory/persisterFactory_test.go index 208542a665b..babf32f660d 100644 --- a/storage/factory/persisterFactory_test.go +++ b/storage/factory/persisterFactory_test.go @@ -3,11 +3,15 @@ package factory_test import ( "fmt" "os" + "path" + "strings" "testing" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" + "github.com/multiversx/mx-chain-storage-go/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -15,8 +19,7 @@ import ( func TestNewPersisterFactory(t *testing.T) { t.Parallel() - dbConfigHandler := factory.NewDBConfigHandler(createDefaultDBConfig()) - pf, err := factory.NewPersisterFactory(dbConfigHandler) + pf, err := factory.NewPersisterFactory(createDefaultDBConfig()) require.NotNil(t, pf) require.Nil(t, err) } @@ -27,25 +30,84 @@ func TestPersisterFactory_Create(t *testing.T) { t.Run("invalid file path, should fail", func(t *testing.T) { t.Parallel() - dbConfigHandler := factory.NewDBConfigHandler(createDefaultDBConfig()) - pf, _ := factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(createDefaultDBConfig()) p, err := pf.Create("") require.Nil(t, p) require.Equal(t, storage.ErrInvalidFilePath, err) }) + t.Run("with tmp file path, should work", func(t *testing.T) { + t.Parallel() + + conf := createDefaultDBConfig() + conf.UseTmpAsFilePath = true + + pf, _ := factory.NewPersisterFactory(conf) + + dir := t.TempDir() + + p, err := pf.Create(dir) + require.NotNil(t, p) + require.Nil(t, err) + + // config.toml will be created in tmp path, but cannot be easily checked since + // the file path is not created deterministically + + // should not find in the dir created initially. + _, err = os.Stat(dir + "/config.toml") + require.Error(t, err) + }) + t.Run("should work", func(t *testing.T) { t.Parallel() - dbConfigHandler := factory.NewDBConfigHandler(createDefaultDBConfig()) - pf, _ := factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(createDefaultDBConfig()) dir := t.TempDir() p, err := pf.Create(dir) require.NotNil(t, p) require.Nil(t, err) + + // check config.toml file exists + _, err = os.Stat(dir + "/config.toml") + require.Nil(t, err) + }) +} + +func TestPersisterFactory_CreateWithRetries(t *testing.T) { + t.Parallel() + + t.Run("wrong config should error", func(t *testing.T) { + t.Parallel() + + path := "TEST" + dbConfig := createDefaultDBConfig() + dbConfig.Type = "invalid type" + + persisterFactory, err := factory.NewPersisterFactory(dbConfig) + assert.Nil(t, err) + + db, err := persisterFactory.CreateWithRetries(path) + assert.True(t, check.IfNil(db)) + assert.Equal(t, common.ErrNotSupportedDBType, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + path := path.Join(t.TempDir(), "TEST") + dbConfig := createDefaultDBConfig() + dbConfig.FilePath = path + + persisterFactory, err := factory.NewPersisterFactory(dbConfig) + assert.Nil(t, err) + + db, err := persisterFactory.CreateWithRetries(path) + assert.False(t, check.IfNil(db)) + assert.Nil(t, err) + _ = db.Close() }) } @@ -57,8 +119,7 @@ func TestPersisterFactory_Create_ConfigSaveToFilePath(t *testing.T) { dbConfig := createDefaultBasePersisterConfig() dbConfig.Type = string(storageunit.LvlDB) - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - pf, _ := factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(dbConfig) dir := t.TempDir() path := dir + "storer/" @@ -77,8 +138,7 @@ func TestPersisterFactory_Create_ConfigSaveToFilePath(t *testing.T) { dbConfig := createDefaultBasePersisterConfig() dbConfig.Type = string(storageunit.LvlDBSerial) - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - pf, _ := factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(dbConfig) dir := t.TempDir() path := dir + "storer/" @@ -97,8 +157,7 @@ func TestPersisterFactory_Create_ConfigSaveToFilePath(t *testing.T) { dbConfig := createDefaultBasePersisterConfig() dbConfig.Type = string(storageunit.MemoryDB) - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - pf, _ := factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(dbConfig) dir := t.TempDir() path := dir + "storer/" @@ -117,8 +176,7 @@ func TestPersisterFactory_Create_ConfigSaveToFilePath(t *testing.T) { dbConfig := createDefaultBasePersisterConfig() dbConfig.Type = string(storageunit.MemoryDB) - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - pf, _ := factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(dbConfig) dir := t.TempDir() path := dir + "storer/" @@ -135,8 +193,7 @@ func TestPersisterFactory_Create_ConfigSaveToFilePath(t *testing.T) { func TestPersisterFactory_CreateDisabled(t *testing.T) { t.Parallel() - dbConfigHandler := factory.NewDBConfigHandler(createDefaultDBConfig()) - factoryInstance, err := factory.NewPersisterFactory(dbConfigHandler) + factoryInstance, err := factory.NewPersisterFactory(createDefaultDBConfig()) require.Nil(t, err) persisterInstance := factoryInstance.CreateDisabled() @@ -147,10 +204,28 @@ func TestPersisterFactory_CreateDisabled(t *testing.T) { func TestPersisterFactory_IsInterfaceNil(t *testing.T) { t.Parallel() - var pf *factory.PersisterFactory - require.True(t, pf.IsInterfaceNil()) - - dbConfigHandler := factory.NewDBConfigHandler(createDefaultDBConfig()) - pf, _ = factory.NewPersisterFactory(dbConfigHandler) + pf, _ := factory.NewPersisterFactory(createDefaultDBConfig()) require.False(t, pf.IsInterfaceNil()) } + +func TestGetTmpFilePath(t *testing.T) { + t.Parallel() + + pathSeparator := "/" + + tmpDir := os.TempDir() + tmpBasePath := path.Join(tmpDir, pathSeparator) + + tmpPath, err := factory.GetTmpFilePath("aaaa/bbbb/cccc") + require.Nil(t, err) + require.True(t, strings.Contains(tmpPath, path.Join(tmpBasePath, "cccc"))) + + tmpPath, _ = factory.GetTmpFilePath("aaaa") + require.True(t, strings.Contains(tmpPath, path.Join(tmpBasePath, "aaaa"))) + + tmpPath, _ = factory.GetTmpFilePath("") + require.True(t, strings.Contains(tmpPath, path.Join(tmpBasePath, ""))) + + tmpPath, _ = factory.GetTmpFilePath("/") + require.True(t, strings.Contains(tmpPath, path.Join(tmpBasePath, ""))) +} diff --git a/storage/factory/storageServiceFactory.go b/storage/factory/storageServiceFactory.go index f316bfec7d7..c153e6b2cc8 100644 --- a/storage/factory/storageServiceFactory.go +++ b/storage/factory/storageServiceFactory.go @@ -27,6 +27,7 @@ var log = logger.GetOrCreate("storage/factory") const ( minimumNumberOfActivePersisters = 1 minimumNumberOfEpochsToKeep = 2 + emptyDBPathSuffix = "" ) // StorageServiceType defines the type of StorageService @@ -134,11 +135,8 @@ func checkArgs(args StorageServiceFactoryArgs) error { return nil } -// TODO: refactor this function, split it into multiple ones -func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( +func (psf *StorageServiceFactory) createAndAddTxStorageUnits( store dataRetriever.StorageService, - customDatabaseRemover storage.CustomDatabaseRemoverHandler, - shardID string, ) error { disabledCustomDatabaseRemover := disabled.NewDisabledCustomDatabaseRemover() @@ -182,6 +180,21 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( } store.AddStorer(dataRetriever.ReceiptsUnit, receiptsUnit) + return nil +} + +func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( + store dataRetriever.StorageService, + customDatabaseRemover storage.CustomDatabaseRemoverHandler, + shardID string, +) error { + disabledCustomDatabaseRemover := disabled.NewDisabledCustomDatabaseRemover() + + err := psf.createAndAddTxStorageUnits(store) + if err != nil { + return err + } + scheduledSCRsUnitArgs, err := psf.createPruningStorerArgs(psf.generalConfig.ScheduledSCRsStorage, disabledCustomDatabaseRemover) if err != nil { return err @@ -222,22 +235,7 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( } store.AddStorer(dataRetriever.MetaBlockUnit, metaBlockUnit) - // metaHdrHashNonce is static - metaHdrHashNonceUnitConfig := GetDBFromConfig(psf.generalConfig.MetaHdrNonceHashStorage.DB) - dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.MetaHdrNonceHashStorage.DB.FilePath) - metaHdrHashNonceUnitConfig.FilePath = dbPath - - dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.MetaHdrNonceHashStorage.DB) - metaHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return err - } - - metaHdrHashNonceUnit, err := storageunit.NewStorageUnitFromConf( - GetCacherFromConfig(psf.generalConfig.MetaHdrNonceHashStorage.Cache), - metaHdrHashNonceUnitConfig, - metaHdrHashNoncePersisterCreator, - ) + metaHdrHashNonceUnit, err := psf.createStaticStorageUnit(psf.generalConfig.MetaHdrNonceHashStorage, shardID, emptyDBPathSuffix) if err != nil { return fmt.Errorf("%w for MetaHdrNonceHashStorage", err) } @@ -259,22 +257,8 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( } store.AddStorer(dataRetriever.UserAccountsUnit, userAccountsUnit) - statusMetricsDbConfig := GetDBFromConfig(psf.generalConfig.StatusMetricsStorage.DB) shardId := core.GetShardIDString(psf.shardCoordinator.SelfId()) - dbPath = psf.pathManager.PathForStatic(shardId, psf.generalConfig.StatusMetricsStorage.DB.FilePath) - statusMetricsDbConfig.FilePath = dbPath - - dbConfigHandlerInstance = NewDBConfigHandler(psf.generalConfig.StatusMetricsStorage.DB) - statusMetricsPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return err - } - - statusMetricsStorageUnit, err := storageunit.NewStorageUnitFromConf( - GetCacherFromConfig(psf.generalConfig.StatusMetricsStorage.Cache), - statusMetricsDbConfig, - statusMetricsPersisterCreator, - ) + statusMetricsStorageUnit, err := psf.createStaticStorageUnit(psf.generalConfig.StatusMetricsStorage, shardId, emptyDBPathSuffix) if err != nil { return fmt.Errorf("%w for StatusMetricsStorage", err) } @@ -289,6 +273,27 @@ func (psf *StorageServiceFactory) createAndAddBaseStorageUnits( return nil } +func (psf *StorageServiceFactory) createStaticStorageUnit( + storageConf config.StorageConfig, + shardID string, + dbPathSuffix string, +) (*storageunit.Unit, error) { + storageUnitDBConf := GetDBFromConfig(storageConf.DB) + dbPath := psf.pathManager.PathForStatic(shardID, storageConf.DB.FilePath) + dbPathSuffix + storageUnitDBConf.FilePath = dbPath + + persisterCreator, err := NewPersisterFactory(storageConf.DB) + if err != nil { + return nil, err + } + + return storageunit.NewStorageUnitFromConf( + GetCacherFromConfig(storageConf.Cache), + storageUnitDBConf, + persisterCreator, + ) +} + // CreateForShard will return the storage service which contains all storers needed for a shard func (psf *StorageServiceFactory) CreateForShard() (dataRetriever.StorageService, error) { // TODO: if there will be a differentiation between the creation or opening of a DB, the DBs could be destroyed on a defer @@ -301,23 +306,8 @@ func (psf *StorageServiceFactory) CreateForShard() (dataRetriever.StorageService } shardID := core.GetShardIDString(psf.shardCoordinator.SelfId()) - - // shardHdrHashNonce storer is static - shardHdrHashNonceConfig := GetDBFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.DB) - dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.ShardHdrNonceHashStorage.DB.FilePath) + shardID - shardHdrHashNonceConfig.FilePath = dbPath - - dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) - shardHdrHashNoncePersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return nil, err - } - - shardHdrHashNonceUnit, err := storageunit.NewStorageUnitFromConf( - GetCacherFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.Cache), - shardHdrHashNonceConfig, - shardHdrHashNoncePersisterCreator, - ) + dbPathSuffix := shardID + shardHdrHashNonceUnit, err := psf.createStaticStorageUnit(psf.generalConfig.ShardHdrNonceHashStorage, shardID, dbPathSuffix) if err != nil { return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage", err) } @@ -382,24 +372,10 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, shardHdrHashNonceUnits := make([]*storageunit.Unit, psf.shardCoordinator.NumberOfShards()) for i := uint32(0); i < psf.shardCoordinator.NumberOfShards(); i++ { - shardHdrHashNonceConfig := GetDBFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.DB) shardID = core.GetShardIDString(core.MetachainShardId) - dbPath := psf.pathManager.PathForStatic(shardID, psf.generalConfig.ShardHdrNonceHashStorage.DB.FilePath) + fmt.Sprintf("%d", i) - shardHdrHashNonceConfig.FilePath = dbPath - - dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.ShardHdrNonceHashStorage.DB) - shardHdrHashNoncePersisterCreator, errLoop := NewPersisterFactory(dbConfigHandlerInstance) - if errLoop != nil { - return nil, errLoop - } - - shardHdrHashNonceUnits[i], errLoop = storageunit.NewStorageUnitFromConf( - GetCacherFromConfig(psf.generalConfig.ShardHdrNonceHashStorage.Cache), - shardHdrHashNonceConfig, - shardHdrHashNoncePersisterCreator, - ) - if errLoop != nil { - return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage on shard %d", errLoop, i) + shardHdrHashNonceUnits[i], err = psf.createStaticStorageUnit(psf.generalConfig.ShardHdrNonceHashStorage, shardID, fmt.Sprintf("%d", i)) + if err != nil { + return nil, fmt.Errorf("%w for ShardHdrNonceHashStorage on shard %d", err, i) } } @@ -527,69 +503,21 @@ func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetri chainStorer.AddStorer(dataRetriever.MiniblocksMetadataUnit, miniblocksMetadataPruningStorer) - // Create the miniblocksHashByTxHash (STATIC) storer - miniblockHashByTxHashConfig := psf.generalConfig.DbLookupExtensions.MiniblockHashByTxHashStorageConfig - miniblockHashByTxHashDbConfig := GetDBFromConfig(miniblockHashByTxHashConfig.DB) - miniblockHashByTxHashDbConfig.FilePath = psf.pathManager.PathForStatic(shardID, miniblockHashByTxHashConfig.DB.FilePath) - miniblockHashByTxHashCacherConfig := GetCacherFromConfig(miniblockHashByTxHashConfig.Cache) - - dbConfigHandlerInstance := NewDBConfigHandler(miniblockHashByTxHashConfig.DB) - miniblockHashByTxHashPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return err - } - - miniblockHashByTxHashUnit, err := storageunit.NewStorageUnitFromConf( - miniblockHashByTxHashCacherConfig, - miniblockHashByTxHashDbConfig, - miniblockHashByTxHashPersisterCreator, - ) + miniblockHashByTxHashUnit, err := psf.createStaticStorageUnit(psf.generalConfig.DbLookupExtensions.MiniblockHashByTxHashStorageConfig, shardID, emptyDBPathSuffix) if err != nil { return fmt.Errorf("%w for DbLookupExtensions.MiniblockHashByTxHashStorageConfig", err) } chainStorer.AddStorer(dataRetriever.MiniblockHashByTxHashUnit, miniblockHashByTxHashUnit) - // Create the blockHashByRound (STATIC) storer - blockHashByRoundConfig := psf.generalConfig.DbLookupExtensions.RoundHashStorageConfig - blockHashByRoundDBConfig := GetDBFromConfig(blockHashByRoundConfig.DB) - blockHashByRoundDBConfig.FilePath = psf.pathManager.PathForStatic(shardID, blockHashByRoundConfig.DB.FilePath) - blockHashByRoundCacherConfig := GetCacherFromConfig(blockHashByRoundConfig.Cache) - - dbConfigHandlerInstance = NewDBConfigHandler(blockHashByRoundConfig.DB) - blockHashByRoundPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return err - } - - blockHashByRoundUnit, err := storageunit.NewStorageUnitFromConf( - blockHashByRoundCacherConfig, - blockHashByRoundDBConfig, - blockHashByRoundPersisterCreator, - ) + blockHashByRoundUnit, err := psf.createStaticStorageUnit(psf.generalConfig.DbLookupExtensions.RoundHashStorageConfig, shardID, emptyDBPathSuffix) if err != nil { return fmt.Errorf("%w for DbLookupExtensions.RoundHashStorageConfig", err) } chainStorer.AddStorer(dataRetriever.RoundHdrHashDataUnit, blockHashByRoundUnit) - // Create the epochByHash (STATIC) storer - epochByHashConfig := psf.generalConfig.DbLookupExtensions.EpochByHashStorageConfig - epochByHashDbConfig := GetDBFromConfig(epochByHashConfig.DB) - epochByHashDbConfig.FilePath = psf.pathManager.PathForStatic(shardID, epochByHashConfig.DB.FilePath) - epochByHashCacherConfig := GetCacherFromConfig(epochByHashConfig.Cache) - - dbConfigHandlerInstance = NewDBConfigHandler(epochByHashConfig.DB) - epochByHashPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return err - } - - epochByHashUnit, err := storageunit.NewStorageUnitFromConf( - epochByHashCacherConfig, - epochByHashDbConfig, - epochByHashPersisterCreator, - ) + epochByHashUnit, err := psf.createStaticStorageUnit(psf.generalConfig.DbLookupExtensions.EpochByHashStorageConfig, shardID, emptyDBPathSuffix) if err != nil { return fmt.Errorf("%w for DbLookupExtensions.EpochByHashStorageConfig", err) } @@ -600,7 +528,7 @@ func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetri } func (psf *StorageServiceFactory) setUpEsdtSuppliesStorer(chainStorer *dataRetriever.ChainStorer, shardIDStr string) error { - esdtSuppliesUnit, err := psf.createEsdtSuppliesUnit(shardIDStr) + esdtSuppliesUnit, err := psf.createStaticStorageUnit(psf.generalConfig.DbLookupExtensions.ESDTSuppliesStorageConfig, shardIDStr, emptyDBPathSuffix) if err != nil { return fmt.Errorf("%w for DbLookupExtensions.ESDTSuppliesStorageConfig", err) } @@ -613,7 +541,7 @@ func (psf *StorageServiceFactory) setUpEsdtSuppliesStorer(chainStorer *dataRetri } time.Sleep(time.Second) // making sure the unit was properly closed and destroyed - esdtSuppliesUnit, err = psf.createEsdtSuppliesUnit(shardIDStr) + esdtSuppliesUnit, err = psf.createStaticStorageUnit(psf.generalConfig.DbLookupExtensions.ESDTSuppliesStorageConfig, shardIDStr, emptyDBPathSuffix) if err != nil { return err } @@ -623,23 +551,6 @@ func (psf *StorageServiceFactory) setUpEsdtSuppliesStorer(chainStorer *dataRetri return nil } -func (psf *StorageServiceFactory) createEsdtSuppliesUnit(shardIDStr string) (storage.Storer, error) { - esdtSuppliesConfig := psf.generalConfig.DbLookupExtensions.ESDTSuppliesStorageConfig - esdtSuppliesDbConfig := GetDBFromConfig(esdtSuppliesConfig.DB) - esdtSuppliesDbConfig.FilePath = psf.pathManager.PathForStatic(shardIDStr, esdtSuppliesConfig.DB.FilePath) - esdtSuppliesCacherConfig := GetCacherFromConfig(esdtSuppliesConfig.Cache) - - dbConfigHandlerInstance := NewDBConfigHandler(esdtSuppliesConfig.DB) - esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return nil, err - } - - return storageunit.NewStorageUnitFromConf( - esdtSuppliesCacherConfig, esdtSuppliesDbConfig, - esdtSuppliesPersisterCreator) -} - func (psf *StorageServiceFactory) createPruningStorerArgs( storageConfig config.StorageConfig, customDatabaseRemover storage.CustomDatabaseRemoverHandler, @@ -655,8 +566,7 @@ func (psf *StorageServiceFactory) createPruningStorerArgs( NumOfActivePersisters: numOfActivePersisters, } - dbConfigHandlerInstance := NewDBConfigHandler(storageConfig.DB) - persisterFactory, err := NewPersisterFactory(dbConfigHandlerInstance) + persisterFactory, err := NewPersisterFactory(storageConfig.DB) if err != nil { return pruning.StorerArgs{}, err } @@ -687,22 +597,8 @@ func (psf *StorageServiceFactory) createTrieEpochRootHashStorerIfNeeded() (stora return storageunit.NewNilStorer(), nil } - trieEpochRootHashDbConfig := GetDBFromConfig(psf.generalConfig.TrieEpochRootHashStorage.DB) shardId := core.GetShardIDString(psf.shardCoordinator.SelfId()) - dbPath := psf.pathManager.PathForStatic(shardId, psf.generalConfig.TrieEpochRootHashStorage.DB.FilePath) - trieEpochRootHashDbConfig.FilePath = dbPath - - dbConfigHandlerInstance := NewDBConfigHandler(psf.generalConfig.TrieEpochRootHashStorage.DB) - esdtSuppliesPersisterCreator, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return nil, err - } - - trieEpochRootHashStorageUnit, err := storageunit.NewStorageUnitFromConf( - GetCacherFromConfig(psf.generalConfig.TrieEpochRootHashStorage.Cache), - trieEpochRootHashDbConfig, - esdtSuppliesPersisterCreator, - ) + trieEpochRootHashStorageUnit, err := psf.createStaticStorageUnit(psf.generalConfig.TrieEpochRootHashStorage, shardId, emptyDBPathSuffix) if err != nil { return nil, fmt.Errorf("%w for TrieEpochRootHashStorage", err) } @@ -713,21 +609,8 @@ func (psf *StorageServiceFactory) createTrieEpochRootHashStorerIfNeeded() (stora func (psf *StorageServiceFactory) createTriePersister( storageConfig config.StorageConfig, ) (storage.Storer, error) { - trieDBConfig := GetDBFromConfig(storageConfig.DB) shardID := core.GetShardIDString(psf.shardCoordinator.SelfId()) - dbPath := psf.pathManager.PathForStatic(shardID, storageConfig.DB.FilePath) - trieDBConfig.FilePath = dbPath - - dbConfigHandlerInstance := NewDBConfigHandler(storageConfig.DB) - persisterFactory, err := NewPersisterFactory(dbConfigHandlerInstance) - if err != nil { - return nil, err - } - - return storageunit.NewStorageUnitFromConf( - GetCacherFromConfig(storageConfig.Cache), - trieDBConfig, - persisterFactory) + return psf.createStaticStorageUnit(storageConfig, shardID, emptyDBPathSuffix) } func (psf *StorageServiceFactory) createTriePruningPersister(arg pruning.StorerArgs) (storage.Storer, error) { diff --git a/storage/interface.go b/storage/interface.go index 328eb86c4ed..543d5d04f5b 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -211,13 +211,14 @@ type ManagedPeersHolder interface { // PersisterFactoryHandler defines the behaviour of a component which is able to create persisters type PersisterFactoryHandler interface { Create(path string) (Persister, error) + CreateWithRetries(path string) (Persister, error) IsInterfaceNil() bool } // StateStatsHandler defines the behaviour needed to handler storage statistics type StateStatsHandler interface { - IncrCache() - IncrSnapshotCache() - IncrPersister(epoch uint32) - IncrSnapshotPersister(epoch uint32) + IncrementCache() + IncrementSnapshotCache() + IncrementPersister(epoch uint32) + IncrementSnapshotPersister(epoch uint32) } diff --git a/storage/latestData/latestDataProvider.go b/storage/latestData/latestDataProvider.go index df6ea7e2418..2b894627de3 100644 --- a/storage/latestData/latestDataProvider.go +++ b/storage/latestData/latestDataProvider.go @@ -132,8 +132,7 @@ func (ldp *latestDataProvider) getEpochDirs() ([]string, error) { } func (ldp *latestDataProvider) getLastEpochAndRoundFromStorage(parentDir string, lastEpoch uint32) (storage.LatestDataFromStorage, error) { - dbConfigHandler := factory.NewDBConfigHandler(ldp.generalConfig.BootstrapStorage.DB) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(ldp.generalConfig.BootstrapStorage.DB) if err != nil { return storage.LatestDataFromStorage{}, err } diff --git a/storage/pruning/fullHistoryPruningStorer_test.go b/storage/pruning/fullHistoryPruningStorer_test.go index c83fc5fae34..0e0d43877e8 100644 --- a/storage/pruning/fullHistoryPruningStorer_test.go +++ b/storage/pruning/fullHistoryPruningStorer_test.go @@ -294,16 +294,13 @@ func TestFullHistoryPruningStorer_ConcurrentOperations(t *testing.T) { fmt.Println(testDir) args := getDefaultArgs() - dbConfigHandler := factory.NewDBConfigHandler( - config.DBConfig{ - FilePath: filepath.Join(testDir, dbName), - Type: "LvlDBSerial", - MaxBatchSize: 100, - MaxOpenFiles: 10, - BatchDelaySeconds: 2, - }, - ) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(config.DBConfig{ + FilePath: filepath.Join(testDir, dbName), + Type: "LvlDBSerial", + MaxBatchSize: 100, + MaxOpenFiles: 10, + BatchDelaySeconds: 2, + }) require.Nil(t, err) args.PersisterFactory = persisterFactory diff --git a/storage/pruning/pruningStorer.go b/storage/pruning/pruningStorer.go index f90f1c75aaa..2007454a7c8 100644 --- a/storage/pruning/pruningStorer.go +++ b/storage/pruning/pruningStorer.go @@ -434,7 +434,7 @@ func (ps *PruningStorer) createAndInitPersister(pd *persisterData) (storage.Pers func (ps *PruningStorer) Get(key []byte) ([]byte, error) { v, ok := ps.cacher.Get(key) if ok { - ps.stateStatsHandler.IncrCache() + ps.stateStatsHandler.IncrementCache() return v.([]byte), nil } @@ -457,7 +457,7 @@ func (ps *PruningStorer) Get(key []byte) ([]byte, error) { // if found in persistence unit, add it to cache and return _ = ps.cacher.Put(key, val, len(val)) - ps.stateStatsHandler.IncrPersister(ps.activePersisters[idx].epoch) + ps.stateStatsHandler.IncrementPersister(ps.activePersisters[idx].epoch) return val, nil } diff --git a/storage/pruning/pruningStorer_test.go b/storage/pruning/pruningStorer_test.go index 29c3765e2d8..248cc53cda2 100644 --- a/storage/pruning/pruningStorer_test.go +++ b/storage/pruning/pruningStorer_test.go @@ -1053,16 +1053,13 @@ func TestPruningStorer_ConcurrentOperations(t *testing.T) { fmt.Println(testDir) args := getDefaultArgs() - dbConfigHandler := factory.NewDBConfigHandler( - config.DBConfig{ - FilePath: filepath.Join(testDir, dbName), - Type: "LvlDBSerial", - MaxBatchSize: 100, - MaxOpenFiles: 10, - BatchDelaySeconds: 2, - }, - ) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(config.DBConfig{ + FilePath: filepath.Join(testDir, dbName), + Type: "LvlDBSerial", + MaxBatchSize: 100, + MaxOpenFiles: 10, + BatchDelaySeconds: 2, + }) require.Nil(t, err) args.PersisterFactory = persisterFactory diff --git a/storage/pruning/triePruningStorer.go b/storage/pruning/triePruningStorer.go index 1eb290023c6..e013820db65 100644 --- a/storage/pruning/triePruningStorer.go +++ b/storage/pruning/triePruningStorer.go @@ -95,7 +95,7 @@ func (ps *triePruningStorer) PutInEpochWithoutCache(key []byte, data []byte, epo func (ps *triePruningStorer) GetFromOldEpochsWithoutAddingToCache(key []byte) ([]byte, core.OptionalUint32, error) { v, ok := ps.cacher.Get(key) if ok && !bytes.Equal([]byte(common.ActiveDBKey), key) { - ps.stateStatsHandler.IncrSnapshotCache() + ps.stateStatsHandler.IncrementSnapshotCache() return v.([]byte), core.OptionalUint32{}, nil } @@ -118,7 +118,7 @@ func (ps *triePruningStorer) GetFromOldEpochsWithoutAddingToCache(key []byte) ([ HasValue: true, } - ps.stateStatsHandler.IncrSnapshotPersister(epoch.Value) + ps.stateStatsHandler.IncrementSnapshotPersister(epoch.Value) return val, epoch, nil } diff --git a/storage/pruning/triePruningStorer_test.go b/storage/pruning/triePruningStorer_test.go index 4d9a7c83227..28dc5c93f8e 100644 --- a/storage/pruning/triePruningStorer_test.go +++ b/storage/pruning/triePruningStorer_test.go @@ -76,6 +76,31 @@ func TestTriePruningStorer_GetFromOldEpochsWithoutCacheSearchesOnlyOldEpochsAndR assert.True(t, strings.Contains(err.Error(), "not found")) } +func TestTriePruningStorer_GetFromOldEpochsWithCache(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + ps, _ := pruning.NewTriePruningStorer(args) + cacher := testscommon.NewCacherMock() + ps.SetCacher(cacher) + + testKey1 := []byte("key1") + testVal1 := []byte("value1") + + err := ps.PutInEpoch(testKey1, testVal1, 0) + assert.Nil(t, err) + + err = ps.ChangeEpochSimple(1) + assert.Nil(t, err) + ps.SetEpochForPutOperation(1) + + res, epoch, err := ps.GetFromOldEpochsWithoutAddingToCache(testKey1) + assert.Equal(t, testVal1, res) + assert.Nil(t, err) + assert.False(t, epoch.HasValue) + assert.Equal(t, uint32(0), epoch.Value) +} + func TestTriePruningStorer_GetFromOldEpochsWithoutCacheLessActivePersisters(t *testing.T) { t.Parallel() diff --git a/storage/storageunit/constants.go b/storage/storageunit/constants.go index 0e128af8123..022715dbcb7 100644 --- a/storage/storageunit/constants.go +++ b/storage/storageunit/constants.go @@ -1,25 +1,27 @@ package storageunit -import "github.com/multiversx/mx-chain-storage-go/storageUnit" +import ( + "github.com/multiversx/mx-chain-storage-go/common" +) const ( // LRUCache defines a cache identifier with least-recently-used eviction mechanism - LRUCache = storageUnit.LRUCache + LRUCache = common.LRUCache // SizeLRUCache defines a cache identifier with least-recently-used eviction mechanism and fixed size in bytes - SizeLRUCache = storageUnit.SizeLRUCache + SizeLRUCache = common.SizeLRUCache ) // DB types that are currently supported const ( // LvlDB represents a levelDB storage identifier - LvlDB = storageUnit.LvlDB + LvlDB = common.LvlDB // LvlDBSerial represents a levelDB storage with serialized operations identifier - LvlDBSerial = storageUnit.LvlDBSerial + LvlDBSerial = common.LvlDBSerial // MemoryDB represents an in memory storage identifier - MemoryDB = storageUnit.MemoryDB + MemoryDB = common.MemoryDB ) // Shard id provider types that are currently supported const ( - BinarySplit = storageUnit.BinarySplit + BinarySplit = common.BinarySplit ) diff --git a/storage/storageunit/storageunit.go b/storage/storageunit/storageunit.go index 4e1605efaa7..c1944777920 100644 --- a/storage/storageunit/storageunit.go +++ b/storage/storageunit/storageunit.go @@ -3,6 +3,8 @@ package storageunit import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-storage-go/common" + "github.com/multiversx/mx-chain-storage-go/factory" "github.com/multiversx/mx-chain-storage-go/storageCacherAdapter" "github.com/multiversx/mx-chain-storage-go/storageUnit" ) @@ -12,25 +14,25 @@ import ( type Unit = storageUnit.Unit // CacheConfig holds the configurable elements of a cache -type CacheConfig = storageUnit.CacheConfig - -// ArgDB is a structure that is used to create a new storage.Persister implementation -type ArgDB = storageUnit.ArgDB +type CacheConfig = common.CacheConfig // DBConfig holds the configurable elements of a database -type DBConfig = storageUnit.DBConfig +type DBConfig = common.DBConfig // NilStorer resembles a disabled implementation of the Storer interface type NilStorer = storageUnit.NilStorer // CacheType represents the type of the supported caches -type CacheType = storageUnit.CacheType +type CacheType = common.CacheType // DBType represents the type of the supported databases -type DBType = storageUnit.DBType +type DBType = common.DBType // ShardIDProviderType represents the type of the supported shard id providers -type ShardIDProviderType = storageUnit.ShardIDProviderType +type ShardIDProviderType = common.ShardIDProviderType + +// ArgDB is a structure that is used to create a new storage.Persister implementation +type ArgDB = factory.ArgDB // NewStorageUnit is the constructor for the storage unit, creating a new storage unit // from the given cacher and persister. @@ -40,17 +42,31 @@ func NewStorageUnit(c storage.Cacher, p storage.Persister) (*Unit, error) { // NewCache creates a new cache from a cache config func NewCache(config CacheConfig) (storage.Cacher, error) { - return storageUnit.NewCache(config) + return factory.NewCache(config) } // NewDB creates a new database from database config -func NewDB(persisterFactory storage.PersisterFactoryHandler, path string) (storage.Persister, error) { - return storageUnit.NewDB(persisterFactory, path) +func NewDB(args ArgDB) (storage.Persister, error) { + return factory.NewDB(args) } // NewStorageUnitFromConf creates a new storage unit from a storage unit config func NewStorageUnitFromConf(cacheConf CacheConfig, dbConf DBConfig, persisterFactory storage.PersisterFactoryHandler) (*Unit, error) { - return storageUnit.NewStorageUnitFromConf(cacheConf, dbConf, persisterFactory) + if dbConf.MaxBatchSize > int(cacheConf.Capacity) { + return nil, common.ErrCacheSizeIsLowerThanBatchSize + } + + cache, err := NewCache(cacheConf) + if err != nil { + return nil, err + } + + db, err := persisterFactory.CreateWithRetries(dbConf.FilePath) + if err != nil { + return nil, err + } + + return NewStorageUnit(cache, db) } // NewNilStorer will return a nil storer diff --git a/storage/storageunit/storageunit_test.go b/storage/storageunit/storageunit_test.go index 34affcb569f..da4aea63b33 100644 --- a/storage/storageunit/storageunit_test.go +++ b/storage/storageunit/storageunit_test.go @@ -72,52 +72,6 @@ func TestNewCache(t *testing.T) { }) } -func TestNewDB(t *testing.T) { - t.Parallel() - - t.Run("wrong config should error", func(t *testing.T) { - t.Parallel() - - path := "TEST" - dbConfig := config.DBConfig{ - FilePath: path, - Type: "invalid type", - BatchDelaySeconds: 5, - MaxBatchSize: 10, - MaxOpenFiles: 10, - } - - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) - assert.Nil(t, err) - - db, err := storageunit.NewDB(persisterFactory, path) - assert.True(t, check.IfNil(db)) - assert.Equal(t, common.ErrNotSupportedDBType, err) - }) - t.Run("should work", func(t *testing.T) { - t.Parallel() - - path := path.Join(t.TempDir(), "TEST") - dbConfig := config.DBConfig{ - FilePath: path, - Type: "LvlDBSerial", - BatchDelaySeconds: 5, - MaxBatchSize: 10, - MaxOpenFiles: 10, - } - - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) - assert.Nil(t, err) - - db, err := storageunit.NewDB(persisterFactory, path) - assert.False(t, check.IfNil(db)) - assert.Nil(t, err) - _ = db.Close() - }) -} - func TestNewStorageUnitFromConf(t *testing.T) { t.Parallel() @@ -144,8 +98,7 @@ func TestNewStorageUnitFromConf(t *testing.T) { MaxBatchSize: dbConfig.MaxBatchSize, MaxOpenFiles: dbConfig.MaxOpenFiles, } - dbConfigHandler := factory.NewDBConfigHandler(dbConf) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(dbConf) assert.Nil(t, err) unit, err := storageunit.NewStorageUnitFromConf(cacheConfig, dbConfig, persisterFactory) @@ -166,8 +119,7 @@ func TestNewStorageUnitFromConf(t *testing.T) { MaxBatchSize: dbConfig.MaxBatchSize, MaxOpenFiles: dbConfig.MaxOpenFiles, } - dbConfigHandler := factory.NewDBConfigHandler(dbConf) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(dbConf) assert.Nil(t, err) unit, err := storageunit.NewStorageUnitFromConf(cacheConfig, dbConfig, persisterFactory) diff --git a/epochStart/mock/argumentsParserMock.go b/testscommon/argumentsParserMock.go similarity index 98% rename from epochStart/mock/argumentsParserMock.go rename to testscommon/argumentsParserMock.go index 02ce8f408ae..b23b66b682b 100644 --- a/epochStart/mock/argumentsParserMock.go +++ b/testscommon/argumentsParserMock.go @@ -1,4 +1,4 @@ -package mock +package testscommon import ( vmcommon "github.com/multiversx/mx-chain-vm-common-go" diff --git a/testscommon/bootstrapMocks/bootstrapParamsStub.go b/testscommon/bootstrapMocks/bootstrapParamsStub.go index d62f2d72b61..56d0b6219bd 100644 --- a/testscommon/bootstrapMocks/bootstrapParamsStub.go +++ b/testscommon/bootstrapMocks/bootstrapParamsStub.go @@ -7,7 +7,7 @@ type BootstrapParamsHandlerMock struct { EpochCalled func() uint32 SelfShardIDCalled func() uint32 NumOfShardsCalled func() uint32 - NodesConfigCalled func() *nodesCoordinator.NodesCoordinatorRegistry + NodesConfigCalled func() nodesCoordinator.NodesCoordinatorRegistryHandler } // Epoch - @@ -36,7 +36,7 @@ func (bphm *BootstrapParamsHandlerMock) NumOfShards() uint32 { } // NodesConfig - -func (bphm *BootstrapParamsHandlerMock) NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry { +func (bphm *BootstrapParamsHandlerMock) NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler { if bphm.NodesConfigCalled != nil { return bphm.NodesConfigCalled() } diff --git a/testscommon/builtInCostHandlerStub.go b/testscommon/builtInCostHandlerStub.go deleted file mode 100644 index 046cc45ac2b..00000000000 --- a/testscommon/builtInCostHandlerStub.go +++ /dev/null @@ -1,34 +0,0 @@ -package testscommon - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { - ComputeBuiltInCostCalled func(tx data.TransactionWithFeeHandler) uint64 - IsBuiltInFuncCallCalled func(tx data.TransactionWithFeeHandler) bool -} - -// ComputeBuiltInCost - -func (stub *BuiltInCostHandlerStub) ComputeBuiltInCost(tx data.TransactionWithFeeHandler) uint64 { - if stub.ComputeBuiltInCostCalled != nil { - return stub.ComputeBuiltInCostCalled(tx) - } - - return 1 -} - -// IsBuiltInFuncCall - -func (stub *BuiltInCostHandlerStub) IsBuiltInFuncCall(tx data.TransactionWithFeeHandler) bool { - if stub.IsBuiltInFuncCallCalled != nil { - return stub.IsBuiltInFuncCallCalled(tx) - } - - return false -} - -// IsInterfaceNil returns true if underlying object is nil -func (stub *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return stub == nil -} diff --git a/testscommon/chainSimulator/chainSimulatorMock.go b/testscommon/chainSimulator/chainSimulatorMock.go new file mode 100644 index 00000000000..07db474a07e --- /dev/null +++ b/testscommon/chainSimulator/chainSimulatorMock.go @@ -0,0 +1,31 @@ +package chainSimulator + +import "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + +// ChainSimulatorMock - +type ChainSimulatorMock struct { + GenerateBlocksCalled func(numOfBlocks int) error + GetNodeHandlerCalled func(shardID uint32) process.NodeHandler +} + +// GenerateBlocks - +func (mock *ChainSimulatorMock) GenerateBlocks(numOfBlocks int) error { + if mock.GenerateBlocksCalled != nil { + return mock.GenerateBlocksCalled(numOfBlocks) + } + + return nil +} + +// GetNodeHandler - +func (mock *ChainSimulatorMock) GetNodeHandler(shardID uint32) process.NodeHandler { + if mock.GetNodeHandlerCalled != nil { + return mock.GetNodeHandlerCalled(shardID) + } + return nil +} + +// IsInterfaceNil - +func (mock *ChainSimulatorMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/chainSimulator/nodeHandlerMock.go b/testscommon/chainSimulator/nodeHandlerMock.go new file mode 100644 index 00000000000..3f306807130 --- /dev/null +++ b/testscommon/chainSimulator/nodeHandlerMock.go @@ -0,0 +1,151 @@ +package chainSimulator + +import ( + chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/sharding" +) + +// NodeHandlerMock - +type NodeHandlerMock struct { + GetProcessComponentsCalled func() factory.ProcessComponentsHolder + GetChainHandlerCalled func() chainData.ChainHandler + GetBroadcastMessengerCalled func() consensus.BroadcastMessenger + GetShardCoordinatorCalled func() sharding.Coordinator + GetCryptoComponentsCalled func() factory.CryptoComponentsHolder + GetCoreComponentsCalled func() factory.CoreComponentsHolder + GetDataComponentsCalled func() factory.DataComponentsHandler + GetStateComponentsCalled func() factory.StateComponentsHolder + GetFacadeHandlerCalled func() shared.FacadeHandler + GetStatusCoreComponentsCalled func() factory.StatusCoreComponentsHolder + SetKeyValueForAddressCalled func(addressBytes []byte, state map[string]string) error + SetStateForAddressCalled func(address []byte, state *dtos.AddressState) error + RemoveAccountCalled func(address []byte) error + CloseCalled func() error +} + +// ForceChangeOfEpoch - +func (mock *NodeHandlerMock) ForceChangeOfEpoch() error { + return nil +} + +// GetProcessComponents - +func (mock *NodeHandlerMock) GetProcessComponents() factory.ProcessComponentsHolder { + if mock.GetProcessComponentsCalled != nil { + return mock.GetProcessComponentsCalled() + } + return nil +} + +// GetChainHandler - +func (mock *NodeHandlerMock) GetChainHandler() chainData.ChainHandler { + if mock.GetChainHandlerCalled != nil { + return mock.GetChainHandlerCalled() + } + return nil +} + +// GetBroadcastMessenger - +func (mock *NodeHandlerMock) GetBroadcastMessenger() consensus.BroadcastMessenger { + if mock.GetBroadcastMessengerCalled != nil { + return mock.GetBroadcastMessengerCalled() + } + return nil +} + +// GetShardCoordinator - +func (mock *NodeHandlerMock) GetShardCoordinator() sharding.Coordinator { + if mock.GetShardCoordinatorCalled != nil { + return mock.GetShardCoordinatorCalled() + } + return nil +} + +// GetCryptoComponents - +func (mock *NodeHandlerMock) GetCryptoComponents() factory.CryptoComponentsHolder { + if mock.GetCryptoComponentsCalled != nil { + return mock.GetCryptoComponentsCalled() + } + return nil +} + +// GetCoreComponents - +func (mock *NodeHandlerMock) GetCoreComponents() factory.CoreComponentsHolder { + if mock.GetCoreComponentsCalled != nil { + return mock.GetCoreComponentsCalled() + } + return nil +} + +// GetDataComponents - +func (mock *NodeHandlerMock) GetDataComponents() factory.DataComponentsHolder { + if mock.GetDataComponentsCalled != nil { + return mock.GetDataComponentsCalled() + } + return nil +} + +// GetStateComponents - +func (mock *NodeHandlerMock) GetStateComponents() factory.StateComponentsHolder { + if mock.GetStateComponentsCalled != nil { + return mock.GetStateComponentsCalled() + } + return nil +} + +// GetFacadeHandler - +func (mock *NodeHandlerMock) GetFacadeHandler() shared.FacadeHandler { + if mock.GetFacadeHandlerCalled != nil { + return mock.GetFacadeHandlerCalled() + } + return nil +} + +// GetStatusCoreComponents - +func (mock *NodeHandlerMock) GetStatusCoreComponents() factory.StatusCoreComponentsHolder { + if mock.GetStatusCoreComponentsCalled != nil { + return mock.GetStatusCoreComponentsCalled() + } + return nil +} + +// SetKeyValueForAddress - +func (mock *NodeHandlerMock) SetKeyValueForAddress(addressBytes []byte, state map[string]string) error { + if mock.SetKeyValueForAddressCalled != nil { + return mock.SetKeyValueForAddressCalled(addressBytes, state) + } + return nil +} + +// SetStateForAddress - +func (mock *NodeHandlerMock) SetStateForAddress(address []byte, state *dtos.AddressState) error { + if mock.SetStateForAddressCalled != nil { + return mock.SetStateForAddressCalled(address, state) + } + return nil +} + +// RemoveAccount - +func (mock *NodeHandlerMock) RemoveAccount(address []byte) error { + if mock.RemoveAccountCalled != nil { + return mock.RemoveAccountCalled(address) + } + + return nil +} + +// Close - +func (mock *NodeHandlerMock) Close() error { + if mock.CloseCalled != nil { + return mock.CloseCalled() + } + return nil +} + +// IsInterfaceNil - +func (mock *NodeHandlerMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/components/components.go b/testscommon/components/components.go index 9bd7f609623..3f57793ec48 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -134,7 +134,7 @@ func GetConsensusArgs(shardCoordinator sharding.Coordinator) consensusComp.Conse coreComponents := GetCoreComponents() cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) - stateComponents := GetStateComponents(coreComponents) + stateComponents := GetStateComponents(coreComponents, GetStatusCoreComponents()) dataComponents := GetDataComponents(coreComponents, shardCoordinator) processComponents := GetProcessComponents( shardCoordinator, @@ -199,6 +199,13 @@ func GetCryptoArgs(coreComponents factory.CoreComponentsHolder) cryptoComp.Crypt }, EnableEpochs: config.EnableEpochs{ BLSMultiSignerEnableEpoch: []config.MultiSignerConfig{{EnableEpoch: 0, Type: "no-KOSK"}}, + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 100, + NodesToShufflePerShard: 2, + }, + }, }, } @@ -257,7 +264,7 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { Enabled: false, Type: "optimized", RefreshIntervalInSec: 10, - ProtocolID: "erd/kad/1.0.0", + ProtocolIDs: []string{"erd/kad/1.0.0"}, InitialPeerList: []string{"peer0", "peer1"}, BucketSize: 10, RoutingTableRefreshIntervalInSec: 5, @@ -325,7 +332,7 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { } // GetStateFactoryArgs - -func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder) stateComp.StateComponentsFactoryArgs { +func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder, statusCoreComp factory.StatusCoreComponentsHolder) stateComp.StateComponentsFactoryArgs { tsm, _ := trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) storageManagerUser, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) tsm, _ = trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) @@ -344,7 +351,7 @@ func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder) stateComp. stateComponentsFactoryArgs := stateComp.StateComponentsFactoryArgs{ Config: GetGeneralConfig(), Core: coreComponents, - StatusCore: GetStatusCoreComponents(), + StatusCore: statusCoreComp, StorageService: disabled.NewChainStorer(), ProcessingMode: common.Normal, ChainHandler: &testscommon.ChainHandlerStub{}, @@ -359,7 +366,7 @@ func GetProcessComponentsFactoryArgs(shardCoordinator sharding.Coordinator) proc cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) dataComponents := GetDataComponents(coreComponents, shardCoordinator) - stateComponents := GetStateComponents(coreComponents) + stateComponents := GetStateComponents(coreComponents, GetStatusCoreComponents()) processArgs := GetProcessArgs( shardCoordinator, coreComponents, @@ -549,6 +556,8 @@ func GetProcessArgs( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100, + NodeLimitPercentage: 100, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -559,12 +568,30 @@ func GetProcessArgs( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, HistoryRepo: &dblookupext.HistoryRepositoryStub{}, FlagsConfig: config.ContextFlagsConfig{ Version: "v1.0.0", }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, + EpochConfig: config.EpochConfig{ + EnableEpochs: config.EnableEpochs{ + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 100, + NodesToShufflePerShard: 2, + }, + }, + }, + }, } } @@ -627,7 +654,7 @@ func GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator shardin cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) dataComponents := GetDataComponents(coreComponents, shardCoordinator) - stateComponents := GetStateComponents(coreComponents) + stateComponents := GetStateComponents(coreComponents, GetStatusCoreComponents()) processComponents := GetProcessComponents( shardCoordinator, coreComponents, @@ -719,22 +746,22 @@ func GetCryptoComponents(coreComponents factory.CoreComponentsHolder) factory.Cr } // GetStateComponents - -func GetStateComponents(coreComponents factory.CoreComponentsHolder) factory.StateComponentsHolder { - stateArgs := GetStateFactoryArgs(coreComponents) +func GetStateComponents(coreComponents factory.CoreComponentsHolder, statusCoreComponents factory.StatusCoreComponentsHolder) factory.StateComponentsHolder { + stateArgs := GetStateFactoryArgs(coreComponents, statusCoreComponents) stateComponentsFactory, err := stateComp.NewStateComponentsFactory(stateArgs) if err != nil { - log.Error("getStateComponents NewStateComponentsFactory", "error", err.Error()) + log.Error("GetStateComponents NewStateComponentsFactory", "error", err.Error()) return nil } stateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) if err != nil { - log.Error("getStateComponents NewManagedStateComponents", "error", err.Error()) + log.Error("GetStateComponents NewManagedStateComponents", "error", err.Error()) return nil } err = stateComponents.Create() if err != nil { - log.Error("getStateComponents Create", "error", err.Error()) + log.Error("GetStateComponents Create", "error", err.Error()) return nil } return stateComponents @@ -757,7 +784,7 @@ func GetStatusCoreComponents() factory.StatusCoreComponentsHolder { err = statusCoreComponents.Create() if err != nil { - log.Error("statusCoreComponents Create", "error", err.Error()) + log.Error("GetStatusCoreComponents Create", "error", err.Error()) return nil } diff --git a/testscommon/components/configs.go b/testscommon/components/configs.go index 96af9f41987..f86a5ae59cc 100644 --- a/testscommon/components/configs.go +++ b/testscommon/components/configs.go @@ -75,12 +75,22 @@ func GetGeneralConfig() config.Config { WasmVMVersions: []config.WasmVMVersionByEpoch{ {StartEpoch: 0, Version: "v0.3"}, }, + TransferAndExecuteByUserAddresses: []string{ + "erd1he8wwxn4az3j82p7wwqsdk794dm7hcrwny6f8dfegkfla34udx7qrf7xje", //shard 0 + "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", //shard 1 + "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 + }, }, }, Execution: config.VirtualMachineConfig{ WasmVMVersions: []config.WasmVMVersionByEpoch{ {StartEpoch: 0, Version: "v0.3"}, }, + TransferAndExecuteByUserAddresses: []string{ + "erd1he8wwxn4az3j82p7wwqsdk794dm7hcrwny6f8dfegkfla34udx7qrf7xje", //shard 0 + "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", //shard 1 + "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 + }, }, GasConfig: config.VirtualMachineGasConfig{ ShardMaxGasPerVmQuery: 1_500_000_000, diff --git a/testscommon/components/default.go b/testscommon/components/default.go index c39baf24385..8e1942037dd 100644 --- a/testscommon/components/default.go +++ b/testscommon/components/default.go @@ -13,12 +13,16 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverTests "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/storageManager" @@ -42,17 +46,18 @@ func GetDefaultCoreComponents() *mock.CoreComponentsMock { MinTransactionVersionCalled: func() uint32 { return 1 }, - WatchdogTimer: &testscommon.WatchdogMock{}, - AlarmSch: &testscommon.AlarmSchedulerStub{}, - NtpSyncTimer: &testscommon.SyncTimerStub{}, - RoundHandlerField: &testscommon.RoundHandlerMock{}, - EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - RatingsConfig: &testscommon.RatingsInfoMock{}, - RatingHandler: &testscommon.RaterMock{}, - NodesConfig: &testscommon.NodesSetupStub{}, - StartTime: time.Time{}, - NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EpochChangeNotifier: &epochNotifierMock.EpochNotifierStub{}, + WatchdogTimer: &testscommon.WatchdogMock{}, + AlarmSch: &testscommon.AlarmSchedulerStub{}, + NtpSyncTimer: &testscommon.SyncTimerStub{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + RatingsConfig: &testscommon.RatingsInfoMock{}, + RatingHandler: &testscommon.RaterMock{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, + StartTime: time.Time{}, + NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EpochChangeNotifier: &epochNotifierMock.EpochNotifierStub{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } } @@ -131,8 +136,8 @@ func GetDefaultProcessComponents(shardCoordinator sharding.Coordinator) *mock.Pr BootSore: &mock.BootstrapStorerMock{}, HeaderSigVerif: &mock.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorStub{}, - ValidatorProvider: &mock.ValidatorsProviderStub{}, + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{}, + ValidatorProvider: &stakingcommon.ValidatorsProviderStub{}, BlockTrack: &mock.BlockTrackerStub{}, PendingMiniBlocksHdl: &mock.PendingMiniBlocksHandlerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, @@ -152,6 +157,7 @@ func GetDefaultProcessComponents(shardCoordinator sharding.Coordinator) *mock.Pr return &mock.PrivateKeyStub{} }, }, - HardforkTriggerField: &testscommon.HardforkTriggerStub{}, + HardforkTriggerField: &testscommon.HardforkTriggerStub{}, + RelayedTxV3ProcessorField: &processMocks.RelayedTxV3ProcessorMock{}, } } diff --git a/testscommon/dataRetriever/poolFactory.go b/testscommon/dataRetriever/poolFactory.go index 77bdeb610a7..a8f4374e800 100644 --- a/testscommon/dataRetriever/poolFactory.go +++ b/testscommon/dataRetriever/poolFactory.go @@ -98,11 +98,10 @@ func CreatePoolsHolder(numShards uint32, selfShard uint32) dataRetriever.PoolsHo MaxOpenFiles: 10, } - dbConfigHandler := storageFactory.NewDBConfigHandler(dbConfig) - persisterFactory, err := storageFactory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := storageFactory.NewPersisterFactory(dbConfig) panicIfError("Create persister factory", err) - persister, err := storageunit.NewDB(persisterFactory, tempDir) + persister, err := persisterFactory.CreateWithRetries(tempDir) panicIfError("Create trieSync DB", err) tnf := factory.NewTrieNodeFactory() diff --git a/testscommon/dataRetriever/poolsHolderMock.go b/testscommon/dataRetriever/poolsHolderMock.go index 5c711addbb0..d3d30562954 100644 --- a/testscommon/dataRetriever/poolsHolderMock.go +++ b/testscommon/dataRetriever/poolsHolderMock.go @@ -4,6 +4,7 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" @@ -142,6 +143,11 @@ func (holder *PoolsHolderMock) Headers() dataRetriever.HeadersPool { return holder.headers } +// SetHeadersPool - +func (holder *PoolsHolderMock) SetHeadersPool(headersPool dataRetriever.HeadersPool) { + holder.headers = headersPool +} + // MiniBlocks - func (holder *PoolsHolderMock) MiniBlocks() storage.Cacher { return holder.miniBlocks diff --git a/testscommon/economicsmocks/economicsDataHandlerStub.go b/testscommon/economicsmocks/economicsDataHandlerStub.go index b6cf36f4491..bb59020bc27 100644 --- a/testscommon/economicsmocks/economicsDataHandlerStub.go +++ b/testscommon/economicsmocks/economicsDataHandlerStub.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/process" ) // EconomicsHandlerStub - @@ -46,6 +47,8 @@ type EconomicsHandlerStub struct { ComputeGasLimitInEpochCalled func(tx data.TransactionWithFeeHandler, epoch uint32) uint64 ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) ComputeTxFeeBasedOnGasUsedInEpochCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int + ComputeRelayedTxFeesCalled func(tx data.TransactionWithFeeHandler) (*big.Int, *big.Int, error) + SetTxTypeHandlerCalled func(txTypeHandler process.TxTypeHandler) error } // ComputeFeeForProcessing - @@ -356,6 +359,22 @@ func (e *EconomicsHandlerStub) ComputeTxFeeBasedOnGasUsedInEpoch(tx data.Transac return nil } +// ComputeRelayedTxFees - +func (e *EconomicsHandlerStub) ComputeRelayedTxFees(tx data.TransactionWithFeeHandler) (*big.Int, *big.Int, error) { + if e.ComputeRelayedTxFeesCalled != nil { + return e.ComputeRelayedTxFeesCalled(tx) + } + return big.NewInt(0), big.NewInt(0), nil +} + +// SetTxTypeHandler - +func (e *EconomicsHandlerStub) SetTxTypeHandler(txTypeHandler process.TxTypeHandler) error { + if e.SetTxTypeHandlerCalled != nil { + return e.SetTxTypeHandlerCalled(txTypeHandler) + } + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (e *EconomicsHandlerStub) IsInterfaceNil() bool { return e == nil diff --git a/testscommon/economicsmocks/economicsHandlerMock.go b/testscommon/economicsmocks/economicsHandlerMock.go index 88a54c90e72..3506d2ba9a7 100644 --- a/testscommon/economicsmocks/economicsHandlerMock.go +++ b/testscommon/economicsmocks/economicsHandlerMock.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/process" ) // EconomicsHandlerMock - @@ -46,6 +47,8 @@ type EconomicsHandlerMock struct { ComputeGasLimitInEpochCalled func(tx data.TransactionWithFeeHandler, epoch uint32) uint64 ComputeGasUsedAndFeeBasedOnRefundValueInEpochCalled func(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) ComputeTxFeeBasedOnGasUsedInEpochCalled func(tx data.TransactionWithFeeHandler, gasUsed uint64, epoch uint32) *big.Int + ComputeRelayedTxFeesCalled func(tx data.TransactionWithFeeHandler) (*big.Int, *big.Int, error) + SetTxTypeHandlerCalled func(txTypeHandler process.TxTypeHandler) error } // LeaderPercentage - @@ -335,6 +338,22 @@ func (ehm *EconomicsHandlerMock) ComputeTxFeeBasedOnGasUsedInEpoch(tx data.Trans return nil } +// ComputeRelayedTxFees - +func (ehm *EconomicsHandlerMock) ComputeRelayedTxFees(tx data.TransactionWithFeeHandler) (*big.Int, *big.Int, error) { + if ehm.ComputeRelayedTxFeesCalled != nil { + return ehm.ComputeRelayedTxFeesCalled(tx) + } + return big.NewInt(0), big.NewInt(0), nil +} + +// SetTxTypeHandler - +func (ehm *EconomicsHandlerMock) SetTxTypeHandler(txTypeHandler process.TxTypeHandler) error { + if ehm.SetTxTypeHandlerCalled != nil { + return ehm.SetTxTypeHandlerCalled(txTypeHandler) + } + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (ehm *EconomicsHandlerMock) IsInterfaceNil() bool { return ehm == nil diff --git a/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go b/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go index 16fc9019390..bf633508147 100644 --- a/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go +++ b/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go @@ -44,6 +44,10 @@ func (stub *EnableEpochsHandlerStub) AddActiveFlags(flags ...core.EnableEpochFla stub.Lock() defer stub.Unlock() + if len(stub.activeFlags) == 0 { + stub.activeFlags = make(map[core.EnableEpochFlag]struct{}) + } + for _, flag := range flags { stub.activeFlags[flag] = struct{}{} } diff --git a/integrationTests/mock/epochStartSystemSCStub.go b/testscommon/epochStartSystemSCStub.go similarity index 72% rename from integrationTests/mock/epochStartSystemSCStub.go rename to testscommon/epochStartSystemSCStub.go index fd2c92553cf..ff4e4addbf4 100644 --- a/integrationTests/mock/epochStartSystemSCStub.go +++ b/testscommon/epochStartSystemSCStub.go @@ -1,6 +1,7 @@ -package mock +package testscommon import ( + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/state" @@ -8,7 +9,7 @@ import ( // EpochStartSystemSCStub - type EpochStartSystemSCStub struct { - ProcessSystemSmartContractCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error + ProcessSystemSmartContractCalled func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error ProcessDelegationRewardsCalled func(miniBlocks block.MiniBlockSlice, txCache epochStart.TransactionCacher) error ToggleUnStakeUnBondCalled func(value bool) error } @@ -22,9 +23,12 @@ func (e *EpochStartSystemSCStub) ToggleUnStakeUnBond(value bool) error { } // ProcessSystemSmartContract - -func (e *EpochStartSystemSCStub) ProcessSystemSmartContract(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { +func (e *EpochStartSystemSCStub) ProcessSystemSmartContract( + validatorsInfo state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, +) error { if e.ProcessSystemSmartContractCalled != nil { - return e.ProcessSystemSmartContractCalled(validatorInfos, nonce, epoch) + return e.ProcessSystemSmartContractCalled(validatorsInfo, header) } return nil } diff --git a/process/mock/epochValidatorInfoCreatorStub.go b/testscommon/epochValidatorInfoCreatorStub.go similarity index 88% rename from process/mock/epochValidatorInfoCreatorStub.go rename to testscommon/epochValidatorInfoCreatorStub.go index 445d305596e..31c07037f1e 100644 --- a/process/mock/epochValidatorInfoCreatorStub.go +++ b/testscommon/epochValidatorInfoCreatorStub.go @@ -1,4 +1,4 @@ -package mock +package testscommon import ( "github.com/multiversx/mx-chain-core-go/data" @@ -9,8 +9,8 @@ import ( // EpochValidatorInfoCreatorStub - type EpochValidatorInfoCreatorStub struct { - CreateValidatorInfoMiniBlocksCalled func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) - VerifyValidatorInfoMiniBlocksCalled func(miniblocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error + CreateValidatorInfoMiniBlocksCalled func(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) + VerifyValidatorInfoMiniBlocksCalled func(miniblocks []*block.MiniBlock, validatorsInfo state.ShardValidatorsInfoMapHandler) error GetLocalValidatorInfoCacheCalled func() epochStart.ValidatorInfoCacher CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte GetValidatorInfoTxsCalled func(body *block.Body) map[string]*state.ShardValidatorInfo @@ -20,7 +20,7 @@ type EpochValidatorInfoCreatorStub struct { } // CreateValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { +func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { if e.CreateValidatorInfoMiniBlocksCalled != nil { return e.CreateValidatorInfoMiniBlocksCalled(validatorInfo) } @@ -28,7 +28,7 @@ func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorI } // VerifyValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error { +func (e *EpochValidatorInfoCreatorStub) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo state.ShardValidatorsInfoMapHandler) error { if e.VerifyValidatorInfoMiniBlocksCalled != nil { return e.VerifyValidatorInfoMiniBlocksCalled(miniBlocks, validatorsInfo) } diff --git a/testscommon/esdtStorageHandlerStub.go b/testscommon/esdtStorageHandlerStub.go index 781e2b33fcc..47825717409 100644 --- a/testscommon/esdtStorageHandlerStub.go +++ b/testscommon/esdtStorageHandlerStub.go @@ -15,8 +15,28 @@ type EsdtStorageHandlerStub struct { GetESDTNFTTokenOnDestinationCalled func(acnt vmcommon.UserAccountHandler, esdtTokenKey []byte, nonce uint64) (*esdt.ESDigitalToken, bool, error) GetESDTNFTTokenOnDestinationWithCustomSystemAccountCalled func(accnt vmcommon.UserAccountHandler, esdtTokenKey []byte, nonce uint64, systemAccount vmcommon.UserAccountHandler) (*esdt.ESDigitalToken, bool, error) WasAlreadySentToDestinationShardAndUpdateStateCalled func(tickerID []byte, nonce uint64, dstAddress []byte) (bool, error) - SaveNFTMetaDataToSystemAccountCalled func(tx data.TransactionHandler) error - AddToLiquiditySystemAccCalled func(esdtTokenKey []byte, nonce uint64, transferValue *big.Int) error + SaveNFTMetaDataCalled func(tx data.TransactionHandler) error + AddToLiquiditySystemAccCalled func(esdtTokenKey []byte, tokenType uint32, nonce uint64, transferValue *big.Int, keepMetadataOnZeroLiquidity bool) error + SaveMetaDataToSystemAccountCalled func(tokenKey []byte, nonce uint64, esdtData *esdt.ESDigitalToken) error + GetMetaDataFromSystemAccountCalled func(bytes []byte, u uint64) (*esdt.MetaData, error) +} + +// SaveMetaDataToSystemAccount - +func (e *EsdtStorageHandlerStub) SaveMetaDataToSystemAccount(tokenKey []byte, nonce uint64, esdtData *esdt.ESDigitalToken) error { + if e.SaveMetaDataToSystemAccountCalled != nil { + return e.SaveMetaDataToSystemAccountCalled(tokenKey, nonce, esdtData) + } + + return nil +} + +// GetMetaDataFromSystemAccount - +func (e *EsdtStorageHandlerStub) GetMetaDataFromSystemAccount(bytes []byte, u uint64) (*esdt.MetaData, error) { + if e.GetMetaDataFromSystemAccountCalled != nil { + return e.GetMetaDataFromSystemAccountCalled(bytes, u) + } + + return nil, nil } // SaveESDTNFTToken - @@ -64,19 +84,19 @@ func (e *EsdtStorageHandlerStub) WasAlreadySentToDestinationShardAndUpdateState( return false, nil } -// SaveNFTMetaDataToSystemAccount - -func (e *EsdtStorageHandlerStub) SaveNFTMetaDataToSystemAccount(tx data.TransactionHandler) error { - if e.SaveNFTMetaDataToSystemAccountCalled != nil { - return e.SaveNFTMetaDataToSystemAccountCalled(tx) +// SaveNFTMetaData - +func (e *EsdtStorageHandlerStub) SaveNFTMetaData(tx data.TransactionHandler) error { + if e.SaveNFTMetaDataCalled != nil { + return e.SaveNFTMetaDataCalled(tx) } return nil } // AddToLiquiditySystemAcc - -func (e *EsdtStorageHandlerStub) AddToLiquiditySystemAcc(esdtTokenKey []byte, nonce uint64, transferValue *big.Int) error { +func (e *EsdtStorageHandlerStub) AddToLiquiditySystemAcc(esdtTokenKey []byte, tokenType uint32, nonce uint64, transferValue *big.Int, keepMetadataOnZeroLiquidity bool) error { if e.AddToLiquiditySystemAccCalled != nil { - return e.AddToLiquiditySystemAccCalled(esdtTokenKey, nonce, transferValue) + return e.AddToLiquiditySystemAccCalled(esdtTokenKey, tokenType, nonce, transferValue, keepMetadataOnZeroLiquidity) } return nil diff --git a/testscommon/generalConfig.go b/testscommon/generalConfig.go index 0cf69ff24ed..53299443ebe 100644 --- a/testscommon/generalConfig.go +++ b/testscommon/generalConfig.go @@ -363,7 +363,8 @@ func GetGeneralConfig() config.Config { CheckNodesOnDisk: false, }, Antiflood: config.AntifloodConfig{ - NumConcurrentResolverJobs: 2, + NumConcurrentResolverJobs: 2, + NumConcurrentResolvingTrieNodesJobs: 1, TxAccumulator: config.TxAccumulatorConfig{ MaxAllowedTimeInMilliseconds: 10, MaxDeviationTimeInMilliseconds: 1, @@ -379,6 +380,11 @@ func GetGeneralConfig() config.Config { WasmVMVersions: []config.WasmVMVersionByEpoch{ {StartEpoch: 0, Version: "*"}, }, + TransferAndExecuteByUserAddresses: []string{ + "erd1he8wwxn4az3j82p7wwqsdk794dm7hcrwny6f8dfegkfla34udx7qrf7xje", //shard 0 + "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", //shard 1 + "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 + }, }, Querying: config.QueryVirtualMachineConfig{ NumConcurrentVMs: 1, @@ -386,6 +392,11 @@ func GetGeneralConfig() config.Config { WasmVMVersions: []config.WasmVMVersionByEpoch{ {StartEpoch: 0, Version: "*"}, }, + TransferAndExecuteByUserAddresses: []string{ + "erd1he8wwxn4az3j82p7wwqsdk794dm7hcrwny6f8dfegkfla34udx7qrf7xje", //shard 0 + "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", //shard 1 + "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 + }, }, }, }, @@ -415,6 +426,12 @@ func GetGeneralConfig() config.Config { "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 }, }, + ResourceStats: config.ResourceStatsConfig{ + RefreshIntervalInSec: 1, + }, + RelayedTransactionConfig: config.RelayedTransactionConfig{ + MaxTransactionsAllowed: 10, + }, } } diff --git a/integrationTests/mock/nodesSetupStub.go b/testscommon/genesisMocks/nodesSetupStub.go similarity index 95% rename from integrationTests/mock/nodesSetupStub.go rename to testscommon/genesisMocks/nodesSetupStub.go index affb71e3530..ebe1cfe778a 100644 --- a/integrationTests/mock/nodesSetupStub.go +++ b/testscommon/genesisMocks/nodesSetupStub.go @@ -1,82 +1,82 @@ -package mock +package genesisMocks -import "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +import ( + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) // NodesSetupStub - type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetChainIdCalled func() string - GetMinTransactionVersionCalled func() uint32 + InitialNodesPubKeysCalled func() map[uint32][]string + InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) + GetShardIDForPubKeyCalled func(pubKey []byte) (uint32, error) + NumberOfShardsCalled func() uint32 GetShardConsensusGroupSizeCalled func() uint32 GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 + GetRoundDurationCalled func() uint64 MinNumberOfMetaNodesCalled func() uint32 + MinNumberOfShardNodesCalled func() uint32 GetHysteresisCalled func() float32 GetAdaptivityCalled func() bool + InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) + InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + GetStartTimeCalled func() int64 + MinNumberOfNodesCalled func() uint32 AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - GetShardIDForPubKeyCalled func(pubkey []byte) (uint32, error) - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - InitialNodesPubKeysCalled func() map[uint32][]string MinNumberOfNodesWithHysteresisCalled func() uint32 MinShardHysteresisNodesCalled func() uint32 MinMetaHysteresisNodesCalled func() uint32 + GetChainIdCalled func() string + GetMinTransactionVersionCalled func() uint32 } -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() +// InitialNodesPubKeys - +func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { + if n.InitialNodesPubKeysCalled != nil { + return n.InitialNodesPubKeysCalled() } - return 1 + return map[uint32][]string{0: {"val1", "val2"}} } -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() +// InitialEligibleNodesPubKeysForShard - +func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { + if n.InitialEligibleNodesPubKeysForShardCalled != nil { + return n.InitialEligibleNodesPubKeysForShardCalled(shardId) } - return 1 + return []string{"val1", "val2"}, nil } -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() +// NumberOfShards - +func (n *NodesSetupStub) NumberOfShards() uint32 { + if n.NumberOfShardsCalled != nil { + return n.NumberOfShardsCalled() } - - return 0 + return 1 } -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() +// GetShardIDForPubKey - +func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { + if n.GetShardIDForPubKeyCalled != nil { + return n.GetShardIDForPubKeyCalled(pubkey) } - - return false + return 0, nil } -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() +// GetShardConsensusGroupSize - +func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { + if n.GetShardConsensusGroupSizeCalled != nil { + return n.GetShardConsensusGroupSizeCalled() } - return 0 + return 1 } -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() +// GetMetaConsensusGroupSize - +func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { + if n.GetMetaConsensusGroupSizeCalled != nil { + return n.GetMetaConsensusGroupSizeCalled() } - return 0 + return 1 } // GetRoundDuration - @@ -84,54 +84,49 @@ func (n *NodesSetupStub) GetRoundDuration() uint64 { if n.GetRoundDurationCalled != nil { return n.GetRoundDurationCalled() } - return 0 + return 4000 } -// GetChainId - -func (n *NodesSetupStub) GetChainId() string { - if n.GetChainIdCalled != nil { - return n.GetChainIdCalled() - } - return "chainID" -} - -// GetMinTransactionVersion - -func (n *NodesSetupStub) GetMinTransactionVersion() uint32 { - if n.GetMinTransactionVersionCalled != nil { - return n.GetMinTransactionVersionCalled() +// MinNumberOfMetaNodes - +func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { + if n.MinNumberOfMetaNodesCalled != nil { + return n.MinNumberOfMetaNodesCalled() } return 1 } -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() +// MinNumberOfShardNodes - +func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { + if n.MinNumberOfShardNodesCalled != nil { + return n.MinNumberOfShardNodesCalled() } - return 0 + return 1 } -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() +// GetHysteresis - +func (n *NodesSetupStub) GetHysteresis() float32 { + if n.GetHysteresisCalled != nil { + return n.GetHysteresisCalled() } return 0 } -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() +// GetAdaptivity - +func (n *NodesSetupStub) GetAdaptivity() bool { + if n.GetAdaptivityCalled != nil { + return n.GetAdaptivityCalled() } - return 0 + return false } // InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { +func (n *NodesSetupStub) InitialNodesInfoForShard( + shardId uint32, +) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { if n.InitialNodesInfoForShardCalled != nil { return n.InitialNodesInfoForShardCalled(shardId) } + return nil, nil, nil } @@ -140,49 +135,56 @@ func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.Genes if n.InitialNodesInfoCalled != nil { return n.InitialNodesInfoCalled() } + return nil, nil } -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() +// GetStartTime - +func (n *NodesSetupStub) GetStartTime() int64 { + if n.GetStartTimeCalled != nil { + return n.GetStartTimeCalled() } - return nil + return 0 } -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) +// MinNumberOfNodes - +func (n *NodesSetupStub) MinNumberOfNodes() uint32 { + if n.MinNumberOfNodesCalled != nil { + return n.MinNumberOfNodesCalled() } - return 0, nil + return 1 } -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) +// MinNumberOfNodesWithHysteresis - +func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { + if n.MinNumberOfNodesWithHysteresisCalled != nil { + return n.MinNumberOfNodesWithHysteresisCalled() } - - return []string{"val1", "val2"}, nil + return n.MinNumberOfNodes() } -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() +// AllInitialNodes - +func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { + if n.AllInitialNodesCalled != nil { + return n.AllInitialNodesCalled() } + return nil +} - return map[uint32][]string{0: {"val1", "val2"}} +// GetChainId - +func (n *NodesSetupStub) GetChainId() string { + if n.GetChainIdCalled != nil { + return n.GetChainIdCalled() + } + return "chainID" } -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() +// GetMinTransactionVersion - +func (n *NodesSetupStub) GetMinTransactionVersion() uint32 { + if n.GetMinTransactionVersionCalled != nil { + return n.GetMinTransactionVersionCalled() } - return n.MinNumberOfNodes() + return 1 } // MinShardHysteresisNodes - diff --git a/testscommon/headerHandlerStub.go b/testscommon/headerHandlerStub.go index 7bbd8d2883e..ab1d354ec60 100644 --- a/testscommon/headerHandlerStub.go +++ b/testscommon/headerHandlerStub.go @@ -12,6 +12,7 @@ type HeaderHandlerStub struct { EpochField uint32 RoundField uint64 TimestampField uint64 + BlockBodyTypeInt32Field int32 GetMiniBlockHeadersWithDstCalled func(destId uint32) map[string]uint32 GetOrderedCrossMiniblocksWithDstCalled func(destId uint32) []*data.MiniBlockInfo GetPubKeysBitmapCalled func() []byte @@ -28,6 +29,15 @@ type HeaderHandlerStub struct { HasScheduledMiniBlocksCalled func() bool GetNonceCalled func() uint64 CheckFieldsForNilCalled func() error + SetShardIDCalled func(shardID uint32) error + SetPrevHashCalled func(hash []byte) error + SetPrevRandSeedCalled func(seed []byte) error + SetPubKeysBitmapCalled func(bitmap []byte) error + SetChainIDCalled func(chainID []byte) error + SetTimeStampCalled func(timestamp uint64) error + SetRandSeedCalled func(seed []byte) error + SetSignatureCalled func(signature []byte) error + SetLeaderSignatureCalled func(signature []byte) error } // GetAccumulatedFees - @@ -56,7 +66,10 @@ func (hhs *HeaderHandlerStub) GetReceiptsHash() []byte { } // SetShardID - -func (hhs *HeaderHandlerStub) SetShardID(_ uint32) error { +func (hhs *HeaderHandlerStub) SetShardID(shardID uint32) error { + if hhs.SetShardIDCalled != nil { + return hhs.SetShardIDCalled(shardID) + } return nil } @@ -114,7 +127,10 @@ func (hhs *HeaderHandlerStub) GetPrevHash() []byte { // GetPrevRandSeed - func (hhs *HeaderHandlerStub) GetPrevRandSeed() []byte { - return hhs.GetPrevRandSeedCalled() + if hhs.GetPrevRandSeedCalled != nil { + return hhs.GetPrevRandSeedCalled() + } + return make([]byte, 0) } // GetRandSeed - @@ -124,7 +140,10 @@ func (hhs *HeaderHandlerStub) GetRandSeed() []byte { // GetPubKeysBitmap - func (hhs *HeaderHandlerStub) GetPubKeysBitmap() []byte { - return hhs.GetPubKeysBitmapCalled() + if hhs.GetPubKeysBitmapCalled != nil { + return hhs.GetPubKeysBitmapCalled() + } + return make([]byte, 0) } // GetSignature - @@ -172,8 +191,11 @@ func (hhs *HeaderHandlerStub) SetRound(_ uint64) error { } // SetTimeStamp - -func (hhs *HeaderHandlerStub) SetTimeStamp(_ uint64) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetTimeStamp(timestamp uint64) error { + if hhs.SetTimeStampCalled != nil { + return hhs.SetTimeStampCalled(timestamp) + } + return nil } // SetRootHash - @@ -182,38 +204,59 @@ func (hhs *HeaderHandlerStub) SetRootHash(_ []byte) error { } // SetPrevHash - -func (hhs *HeaderHandlerStub) SetPrevHash(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetPrevHash(hash []byte) error { + if hhs.SetPrevHashCalled != nil { + return hhs.SetPrevHashCalled(hash) + } + return nil } // SetPrevRandSeed - -func (hhs *HeaderHandlerStub) SetPrevRandSeed(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetPrevRandSeed(seed []byte) error { + if hhs.SetPrevRandSeedCalled != nil { + return hhs.SetPrevRandSeedCalled(seed) + } + return nil } // SetRandSeed - -func (hhs *HeaderHandlerStub) SetRandSeed(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetRandSeed(seed []byte) error { + if hhs.SetRandSeedCalled != nil { + return hhs.SetRandSeedCalled(seed) + } + return nil } // SetPubKeysBitmap - -func (hhs *HeaderHandlerStub) SetPubKeysBitmap(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetPubKeysBitmap(bitmap []byte) error { + if hhs.SetPubKeysBitmapCalled != nil { + return hhs.SetPubKeysBitmapCalled(bitmap) + } + return nil } // SetSignature - -func (hhs *HeaderHandlerStub) SetSignature(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetSignature(signature []byte) error { + if hhs.SetSignatureCalled != nil { + return hhs.SetSignatureCalled(signature) + } + return nil } // SetLeaderSignature - -func (hhs *HeaderHandlerStub) SetLeaderSignature(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetLeaderSignature(signature []byte) error { + if hhs.SetLeaderSignatureCalled != nil { + return hhs.SetLeaderSignatureCalled(signature) + } + return nil } // SetChainID - -func (hhs *HeaderHandlerStub) SetChainID(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetChainID(chainID []byte) error { + if hhs.SetChainIDCalled != nil { + return hhs.SetChainIDCalled(chainID) + } + return nil } // SetTxCount - @@ -248,7 +291,7 @@ func (hhs *HeaderHandlerStub) GetMetaBlockHashes() [][]byte { // GetBlockBodyTypeInt32 - func (hhs *HeaderHandlerStub) GetBlockBodyTypeInt32() int32 { - panic("implement me") + return hhs.BlockBodyTypeInt32Field } // GetValidatorStatsRootHash - @@ -377,3 +420,10 @@ func (hhs *HeaderHandlerStub) HasScheduledMiniBlocks() bool { } return false } + +// SetBlockBodyTypeInt32 - +func (hhs *HeaderHandlerStub) SetBlockBodyTypeInt32(blockBodyType int32) error { + hhs.BlockBodyTypeInt32Field = blockBodyType + + return nil +} diff --git a/testscommon/integrationtests/factory.go b/testscommon/integrationtests/factory.go index 4d2f9ad02d8..9acfa7c5e10 100644 --- a/testscommon/integrationtests/factory.go +++ b/testscommon/integrationtests/factory.go @@ -62,8 +62,7 @@ func CreateStorer(parentDir string) storage.Storer { MaxBatchSize: 45000, MaxOpenFiles: 10, } - dbConfigHandler := factory.NewDBConfigHandler(dbConfig) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(dbConfig) if err != nil { return nil } diff --git a/testscommon/keysHandlerSingleSignerMock.go b/testscommon/keysHandlerSingleSignerMock.go index 9235a5a2abe..afc38cbfab5 100644 --- a/testscommon/keysHandlerSingleSignerMock.go +++ b/testscommon/keysHandlerSingleSignerMock.go @@ -67,6 +67,11 @@ func (mock *keysHandlerSingleSignerMock) IsOriginalPublicKeyOfTheNode(pkBytes [] func (mock *keysHandlerSingleSignerMock) ResetRoundsWithoutReceivedMessages(_ []byte, _ core.PeerID) { } +// GetRedundancyStepInReason - +func (mock *keysHandlerSingleSignerMock) GetRedundancyStepInReason() string { + return "" +} + // IsInterfaceNil - func (mock *keysHandlerSingleSignerMock) IsInterfaceNil() bool { return mock == nil diff --git a/testscommon/keysHandlerStub.go b/testscommon/keysHandlerStub.go index 8549de432f3..5821f305654 100644 --- a/testscommon/keysHandlerStub.go +++ b/testscommon/keysHandlerStub.go @@ -15,6 +15,7 @@ type KeysHandlerStub struct { GetAssociatedPidCalled func(pkBytes []byte) core.PeerID IsOriginalPublicKeyOfTheNodeCalled func(pkBytes []byte) bool ResetRoundsWithoutReceivedMessagesCalled func(pkBytes []byte, pid core.PeerID) + GetRedundancyStepInReasonCalled func() string } // GetHandledPrivateKey - @@ -76,6 +77,15 @@ func (stub *KeysHandlerStub) ResetRoundsWithoutReceivedMessages(pkBytes []byte, } } +// GetRedundancyStepInReason - +func (stub *KeysHandlerStub) GetRedundancyStepInReason() string { + if stub.GetRedundancyStepInReasonCalled != nil { + return stub.GetRedundancyStepInReasonCalled() + } + + return "" +} + // IsInterfaceNil - func (stub *KeysHandlerStub) IsInterfaceNil() bool { return stub == nil diff --git a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go index 8c9d56dca7b..62d7232eaf4 100644 --- a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go +++ b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go @@ -6,19 +6,21 @@ import ( "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) // BootstrapComponentsStub - type BootstrapComponentsStub struct { - Bootstrapper factory.EpochStartBootstrapper - BootstrapParams factory.BootstrapParamsHolder - NodeRole core.NodeType - ShCoordinator sharding.Coordinator - ShardCoordinatorCalled func() sharding.Coordinator - HdrVersionHandler nodeFactory.HeaderVersionHandler - VersionedHdrFactory nodeFactory.VersionedHeaderFactory - HdrIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler - GuardedAccountHandlerField process.GuardedAccountHandler + Bootstrapper factory.EpochStartBootstrapper + BootstrapParams factory.BootstrapParamsHolder + NodeRole core.NodeType + ShCoordinator sharding.Coordinator + ShardCoordinatorCalled func() sharding.Coordinator + HdrVersionHandler nodeFactory.HeaderVersionHandler + VersionedHdrFactory nodeFactory.VersionedHeaderFactory + HdrIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + GuardedAccountHandlerField process.GuardedAccountHandler + NodesCoordinatorRegistryFactoryField nodesCoordinator.NodesCoordinatorRegistryFactory } // Create - @@ -85,6 +87,11 @@ func (bcs *BootstrapComponentsStub) GuardedAccountHandler() process.GuardedAccou return bcs.GuardedAccountHandlerField } +// NodesCoordinatorRegistryFactory - +func (bcs *BootstrapComponentsStub) NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory { + return bcs.NodesCoordinatorRegistryFactoryField +} + // String - func (bcs *BootstrapComponentsStub) String() string { return "BootstrapComponentsStub" diff --git a/testscommon/mainFactoryMocks/dataComponentsStub.go b/testscommon/mainFactoryMocks/dataComponentsStub.go new file mode 100644 index 00000000000..3de2c0b33e6 --- /dev/null +++ b/testscommon/mainFactoryMocks/dataComponentsStub.go @@ -0,0 +1,69 @@ +package mainFactoryMocks + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/factory" +) + +// DataComponentsHolderStub - +type DataComponentsHolderStub struct { + BlockchainCalled func() data.ChainHandler + SetBlockchainCalled func(chain data.ChainHandler) + StorageServiceCalled func() dataRetriever.StorageService + DatapoolCalled func() dataRetriever.PoolsHolder + MiniBlocksProviderCalled func() factory.MiniBlockProvider + CloneCalled func() interface{} +} + +// Blockchain - +func (dchs *DataComponentsHolderStub) Blockchain() data.ChainHandler { + if dchs.BlockchainCalled != nil { + return dchs.BlockchainCalled() + } + return nil +} + +// SetBlockchain - +func (dchs *DataComponentsHolderStub) SetBlockchain(chain data.ChainHandler) { + if dchs.SetBlockchainCalled != nil { + dchs.SetBlockchainCalled(chain) + } +} + +// StorageService - +func (dchs *DataComponentsHolderStub) StorageService() dataRetriever.StorageService { + if dchs.StorageServiceCalled != nil { + return dchs.StorageServiceCalled() + } + return nil +} + +// Datapool - +func (dchs *DataComponentsHolderStub) Datapool() dataRetriever.PoolsHolder { + if dchs.DatapoolCalled != nil { + return dchs.DatapoolCalled() + } + return nil +} + +// MiniBlocksProvider - +func (dchs *DataComponentsHolderStub) MiniBlocksProvider() factory.MiniBlockProvider { + if dchs.MiniBlocksProviderCalled != nil { + return dchs.MiniBlocksProviderCalled() + } + return nil +} + +// Clone - +func (dchs *DataComponentsHolderStub) Clone() interface{} { + if dchs.CloneCalled != nil { + return dchs.CloneCalled() + } + return nil +} + +// IsInterfaceNil - +func (dchs *DataComponentsHolderStub) IsInterfaceNil() bool { + return dchs == nil +} diff --git a/testscommon/managedPeersHolderStub.go b/testscommon/managedPeersHolderStub.go index 1cbd397debc..ef9a550fe2b 100644 --- a/testscommon/managedPeersHolderStub.go +++ b/testscommon/managedPeersHolderStub.go @@ -17,6 +17,7 @@ type ManagedPeersHolderStub struct { IncrementRoundsWithoutReceivedMessagesCalled func(pkBytes []byte) ResetRoundsWithoutReceivedMessagesCalled func(pkBytes []byte, pid core.PeerID) GetManagedKeysByCurrentNodeCalled func() map[string]crypto.PrivateKey + GetLoadedKeysByCurrentNodeCalled func() [][]byte IsKeyManagedByCurrentNodeCalled func(pkBytes []byte) bool IsKeyRegisteredCalled func(pkBytes []byte) bool IsPidManagedByCurrentNodeCalled func(pid core.PeerID) bool @@ -25,6 +26,7 @@ type ManagedPeersHolderStub struct { GetNextPeerAuthenticationTimeCalled func(pkBytes []byte) (time.Time, error) SetNextPeerAuthenticationTimeCalled func(pkBytes []byte, nextTime time.Time) IsMultiKeyModeCalled func() bool + GetRedundancyStepInReasonCalled func() string } // AddManagedPeer - @@ -89,6 +91,14 @@ func (stub *ManagedPeersHolderStub) GetManagedKeysByCurrentNode() map[string]cry return nil } +// GetLoadedKeysByCurrentNode - +func (stub *ManagedPeersHolderStub) GetLoadedKeysByCurrentNode() [][]byte { + if stub.GetLoadedKeysByCurrentNodeCalled != nil { + return stub.GetLoadedKeysByCurrentNodeCalled() + } + return make([][]byte, 0) +} + // IsKeyManagedByCurrentNode - func (stub *ManagedPeersHolderStub) IsKeyManagedByCurrentNode(pkBytes []byte) bool { if stub.IsKeyManagedByCurrentNodeCalled != nil { @@ -151,6 +161,15 @@ func (stub *ManagedPeersHolderStub) IsMultiKeyMode() bool { return false } +// GetRedundancyStepInReason - +func (stub *ManagedPeersHolderStub) GetRedundancyStepInReason() string { + if stub.GetRedundancyStepInReasonCalled != nil { + return stub.GetRedundancyStepInReasonCalled() + } + + return "" +} + // IsInterfaceNil - func (stub *ManagedPeersHolderStub) IsInterfaceNil() bool { return stub == nil diff --git a/testscommon/managedPeersMonitorStub.go b/testscommon/managedPeersMonitorStub.go index 2ae60ccc55e..43aea679c14 100644 --- a/testscommon/managedPeersMonitorStub.go +++ b/testscommon/managedPeersMonitorStub.go @@ -6,6 +6,7 @@ type ManagedPeersMonitorStub struct { GetEligibleManagedKeysCalled func() ([][]byte, error) GetWaitingManagedKeysCalled func() ([][]byte, error) GetManagedKeysCalled func() [][]byte + GetLoadedKeysCalled func() [][]byte } // GetManagedKeys - @@ -16,6 +17,14 @@ func (stub *ManagedPeersMonitorStub) GetManagedKeys() [][]byte { return make([][]byte, 0) } +// GetLoadedKeys - +func (stub *ManagedPeersMonitorStub) GetLoadedKeys() [][]byte { + if stub.GetLoadedKeysCalled != nil { + return stub.GetLoadedKeysCalled() + } + return make([][]byte, 0) +} + // GetManagedKeysCount - func (stub *ManagedPeersMonitorStub) GetManagedKeysCount() int { if stub.GetManagedKeysCountCalled != nil { diff --git a/testscommon/maxNodesChangeConfigProviderStub.go b/testscommon/maxNodesChangeConfigProviderStub.go new file mode 100644 index 00000000000..1d7195e84f7 --- /dev/null +++ b/testscommon/maxNodesChangeConfigProviderStub.go @@ -0,0 +1,40 @@ +package testscommon + +import "github.com/multiversx/mx-chain-go/config" + +// MaxNodesChangeConfigProviderStub - +type MaxNodesChangeConfigProviderStub struct { + GetAllNodesConfigCalled func() []config.MaxNodesChangeConfig + GetCurrentNodesConfigCalled func() config.MaxNodesChangeConfig + EpochConfirmedCalled func(epoch uint32, round uint64) +} + +// GetAllNodesConfig - +func (stub *MaxNodesChangeConfigProviderStub) GetAllNodesConfig() []config.MaxNodesChangeConfig { + if stub.GetAllNodesConfigCalled != nil { + return stub.GetAllNodesConfigCalled() + } + + return nil +} + +// GetCurrentNodesConfig - +func (stub *MaxNodesChangeConfigProviderStub) GetCurrentNodesConfig() config.MaxNodesChangeConfig { + if stub.GetCurrentNodesConfigCalled != nil { + return stub.GetCurrentNodesConfigCalled() + } + + return config.MaxNodesChangeConfig{} +} + +// EpochConfirmed - +func (stub *MaxNodesChangeConfigProviderStub) EpochConfirmed(epoch uint32, round uint64) { + if stub.EpochConfirmedCalled != nil { + stub.EpochConfirmedCalled(epoch, round) + } +} + +// IsInterfaceNil - +func (stub *MaxNodesChangeConfigProviderStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/nodesSetupMock.go b/testscommon/nodesSetupMock.go deleted file mode 100644 index 070e0ecb6a2..00000000000 --- a/testscommon/nodesSetupMock.go +++ /dev/null @@ -1,191 +0,0 @@ -package testscommon - -import ( - "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" -) - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesPubKeysCalled func() map[uint32][]string - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - GetShardIDForPubKeyCalled func(pubKey []byte) (uint32, error) - NumberOfShardsCalled func() uint32 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - GetRoundDurationCalled func() uint64 - MinNumberOfMetaNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - GetHysteresisCalled func() float32 - GetAdaptivityCalled func() bool - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - MinNumberOfNodesCalled func() uint32 - AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - MinNumberOfNodesWithHysteresisCalled func() uint32 - MinShardHysteresisNodesCalled func() uint32 - MinMetaHysteresisNodesCalled func() uint32 -} - -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() - } - - return map[uint32][]string{0: {"val1", "val2"}} -} - -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) - } - - return []string{"val1", "val2"}, nil -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 1 -} - -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) - } - return 0, nil -} - -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 1 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 1 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 4000 -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - return 1 -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - return 1 -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - return 0 -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - return false -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard( - shardId uint32, -) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - - return nil, nil -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 1 -} - -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() - } - return n.MinNumberOfNodes() -} - -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() - } - return nil -} - -// MinShardHysteresisNodes - -func (n *NodesSetupStub) MinShardHysteresisNodes() uint32 { - if n.MinShardHysteresisNodesCalled != nil { - return n.MinShardHysteresisNodesCalled() - } - return 1 -} - -// MinMetaHysteresisNodes - -func (n *NodesSetupStub) MinMetaHysteresisNodes() uint32 { - if n.MinMetaHysteresisNodesCalled != nil { - return n.MinMetaHysteresisNodesCalled() - } - return 1 -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/testscommon/nodesSetupMock/nodesSetupMock.go b/testscommon/nodesSetupMock/nodesSetupMock.go new file mode 100644 index 00000000000..392cb038719 --- /dev/null +++ b/testscommon/nodesSetupMock/nodesSetupMock.go @@ -0,0 +1,47 @@ +package nodesSetupMock + +// NodesSetupMock - +type NodesSetupMock struct { + NumberOfShardsField uint32 + HysteresisField float32 + MinNumberOfMetaNodesField uint32 + MinNumberOfShardNodesField uint32 +} + +// NumberOfShards - +func (n *NodesSetupMock) NumberOfShards() uint32 { + return n.NumberOfShardsField +} + +// GetHysteresis - +func (n *NodesSetupMock) GetHysteresis() float32 { + return n.HysteresisField +} + +// MinNumberOfMetaNodes - +func (n *NodesSetupMock) MinNumberOfMetaNodes() uint32 { + return n.MinNumberOfMetaNodesField +} + +// MinNumberOfShardNodes - +func (n *NodesSetupMock) MinNumberOfShardNodes() uint32 { + return n.MinNumberOfShardNodesField +} + +// MinNumberOfNodes - +func (n *NodesSetupMock) MinNumberOfNodes() uint32 { + return n.NumberOfShardsField*n.MinNumberOfShardNodesField + n.MinNumberOfMetaNodesField +} + +// MinNumberOfNodesWithHysteresis - +func (n *NodesSetupMock) MinNumberOfNodesWithHysteresis() uint32 { + hystNodesMeta := getHysteresisNodes(n.MinNumberOfMetaNodesField, n.HysteresisField) + hystNodesShard := getHysteresisNodes(n.MinNumberOfShardNodesField, n.HysteresisField) + minNumberOfNodes := n.MinNumberOfNodes() + + return minNumberOfNodes + hystNodesMeta + n.NumberOfShardsField*hystNodesShard +} + +func getHysteresisNodes(minNumNodes uint32, hysteresis float32) uint32 { + return uint32(float32(minNumNodes) * hysteresis) +} diff --git a/testscommon/p2pmocks/messageProcessorStub.go b/testscommon/p2pmocks/messageProcessorStub.go new file mode 100644 index 00000000000..5802dcc6785 --- /dev/null +++ b/testscommon/p2pmocks/messageProcessorStub.go @@ -0,0 +1,25 @@ +package p2pmocks + +import ( + "github.com/multiversx/mx-chain-communication-go/p2p" + "github.com/multiversx/mx-chain-core-go/core" +) + +// MessageProcessorStub - +type MessageProcessorStub struct { + ProcessReceivedMessageCalled func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error +} + +// ProcessReceivedMessage - +func (stub *MessageProcessorStub) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { + if stub.ProcessReceivedMessageCalled != nil { + return stub.ProcessReceivedMessageCalled(message, fromConnectedPeer, source) + } + + return nil +} + +// IsInterfaceNil - +func (stub *MessageProcessorStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/p2pmocks/messengerStub.go b/testscommon/p2pmocks/messengerStub.go index 368b8bdadd5..c48c95b9868 100644 --- a/testscommon/p2pmocks/messengerStub.go +++ b/testscommon/p2pmocks/messengerStub.go @@ -40,12 +40,12 @@ type MessengerStub struct { WaitForConnectionsCalled func(maxWaitingTime time.Duration, minNumOfPeers uint32) SignCalled func(payload []byte) ([]byte, error) VerifyCalled func(payload []byte, pid core.PeerID, signature []byte) error - AddPeerTopicNotifierCalled func(notifier p2p.PeerTopicNotifier) error BroadcastUsingPrivateKeyCalled func(topic string, buff []byte, pid core.PeerID, skBytes []byte) BroadcastOnChannelUsingPrivateKeyCalled func(channel string, topic string, buff []byte, pid core.PeerID, skBytes []byte) SignUsingPrivateKeyCalled func(skBytes []byte, payload []byte) ([]byte, error) ProcessReceivedMessageCalled func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error SetDebuggerCalled func(debugger p2p.Debugger) error + HasCompatibleProtocolIDCalled func(address string) bool } // ID - @@ -321,15 +321,6 @@ func (ms *MessengerStub) Verify(payload []byte, pid core.PeerID, signature []byt return nil } -// AddPeerTopicNotifier - -func (ms *MessengerStub) AddPeerTopicNotifier(notifier p2p.PeerTopicNotifier) error { - if ms.AddPeerTopicNotifierCalled != nil { - return ms.AddPeerTopicNotifierCalled(notifier) - } - - return nil -} - // BroadcastUsingPrivateKey - func (ms *MessengerStub) BroadcastUsingPrivateKey(topic string, buff []byte, pid core.PeerID, skBytes []byte) { if ms.BroadcastUsingPrivateKeyCalled != nil { @@ -369,6 +360,15 @@ func (ms *MessengerStub) SetDebugger(debugger p2p.Debugger) error { return nil } +// HasCompatibleProtocolID - +func (ms *MessengerStub) HasCompatibleProtocolID(address string) bool { + if ms.HasCompatibleProtocolIDCalled != nil { + return ms.HasCompatibleProtocolIDCalled(address) + } + + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (ms *MessengerStub) IsInterfaceNil() bool { return ms == nil diff --git a/testscommon/pool/headersPoolStub.go b/testscommon/pool/headersPoolStub.go new file mode 100644 index 00000000000..66c01d91c68 --- /dev/null +++ b/testscommon/pool/headersPoolStub.go @@ -0,0 +1,105 @@ +package pool + +import ( + "errors" + + "github.com/multiversx/mx-chain-core-go/data" +) + +// HeadersPoolStub - +type HeadersPoolStub struct { + AddCalled func(headerHash []byte, header data.HeaderHandler) + RemoveHeaderByHashCalled func(headerHash []byte) + RemoveHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) + GetHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) + GetHeaderByHashCalled func(hash []byte) (data.HeaderHandler, error) + ClearCalled func() + RegisterHandlerCalled func(handler func(header data.HeaderHandler, shardHeaderHash []byte)) + NoncesCalled func(shardId uint32) []uint64 + LenCalled func() int + MaxSizeCalled func() int + GetNumHeadersCalled func(shardId uint32) int +} + +// AddHeader - +func (hps *HeadersPoolStub) AddHeader(headerHash []byte, header data.HeaderHandler) { + if hps.AddCalled != nil { + hps.AddCalled(headerHash, header) + } +} + +// RemoveHeaderByHash - +func (hps *HeadersPoolStub) RemoveHeaderByHash(headerHash []byte) { + if hps.RemoveHeaderByHashCalled != nil { + hps.RemoveHeaderByHashCalled(headerHash) + } +} + +// RemoveHeaderByNonceAndShardId - +func (hps *HeadersPoolStub) RemoveHeaderByNonceAndShardId(hdrNonce uint64, shardId uint32) { + if hps.RemoveHeaderByNonceAndShardIdCalled != nil { + hps.RemoveHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } +} + +// GetHeadersByNonceAndShardId - +func (hps *HeadersPoolStub) GetHeadersByNonceAndShardId(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + if hps.GetHeaderByNonceAndShardIdCalled != nil { + return hps.GetHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } + return nil, nil, errors.New("err") +} + +// GetHeaderByHash - +func (hps *HeadersPoolStub) GetHeaderByHash(hash []byte) (data.HeaderHandler, error) { + if hps.GetHeaderByHashCalled != nil { + return hps.GetHeaderByHashCalled(hash) + } + return nil, nil +} + +// Clear - +func (hps *HeadersPoolStub) Clear() { + if hps.ClearCalled != nil { + hps.ClearCalled() + } +} + +// RegisterHandler - +func (hps *HeadersPoolStub) RegisterHandler(handler func(header data.HeaderHandler, shardHeaderHash []byte)) { + if hps.RegisterHandlerCalled != nil { + hps.RegisterHandlerCalled(handler) + } +} + +// Nonces - +func (hps *HeadersPoolStub) Nonces(shardId uint32) []uint64 { + if hps.NoncesCalled != nil { + return hps.NoncesCalled(shardId) + } + return nil +} + +// Len - +func (hps *HeadersPoolStub) Len() int { + return 0 +} + +// MaxSize - +func (hps *HeadersPoolStub) MaxSize() int { + return 100 +} + +// IsInterfaceNil - +func (hps *HeadersPoolStub) IsInterfaceNil() bool { + return hps == nil +} + +// GetNumHeaders - +func (hps *HeadersPoolStub) GetNumHeaders(shardId uint32) int { + if hps.GetNumHeadersCalled != nil { + return hps.GetNumHeadersCalled(shardId) + } + + return 0 +} diff --git a/testscommon/preProcessorExecutionInfoHandlerMock.go b/testscommon/preProcessorExecutionInfoHandlerMock.go index 116f58f7d88..0946db0f4ba 100644 --- a/testscommon/preProcessorExecutionInfoHandlerMock.go +++ b/testscommon/preProcessorExecutionInfoHandlerMock.go @@ -3,7 +3,7 @@ package testscommon // PreProcessorExecutionInfoHandlerMock - type PreProcessorExecutionInfoHandlerMock struct { GetNumOfCrossInterMbsAndTxsCalled func() (int, int) - InitProcessedTxsResultsCalled func(key []byte) + InitProcessedTxsResultsCalled func(key []byte, parentKey []byte) RevertProcessedTxsResultsCalled func(txHashes [][]byte, key []byte) } @@ -16,9 +16,9 @@ func (ppeihm *PreProcessorExecutionInfoHandlerMock) GetNumOfCrossInterMbsAndTxs( } // InitProcessedTxsResults - -func (ppeihm *PreProcessorExecutionInfoHandlerMock) InitProcessedTxsResults(key []byte) { +func (ppeihm *PreProcessorExecutionInfoHandlerMock) InitProcessedTxsResults(key []byte, parentKey []byte) { if ppeihm.InitProcessedTxsResultsCalled != nil { - ppeihm.InitProcessedTxsResultsCalled(key) + ppeihm.InitProcessedTxsResultsCalled(key, parentKey) } } diff --git a/testscommon/processMocks/failedTxLogsAccumulatorMock.go b/testscommon/processMocks/failedTxLogsAccumulatorMock.go new file mode 100644 index 00000000000..903e56cd79f --- /dev/null +++ b/testscommon/processMocks/failedTxLogsAccumulatorMock.go @@ -0,0 +1,41 @@ +package processMocks + +import ( + "github.com/multiversx/mx-chain-core-go/data" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +// FailedTxLogsAccumulatorMock - +type FailedTxLogsAccumulatorMock struct { + GetLogsCalled func(txHash []byte) (data.TransactionHandler, []*vmcommon.LogEntry, bool) + SaveLogsCalled func(txHash []byte, tx data.TransactionHandler, logs []*vmcommon.LogEntry) error + RemoveCalled func(txHash []byte) +} + +// GetLogs - +func (mock *FailedTxLogsAccumulatorMock) GetLogs(txHash []byte) (data.TransactionHandler, []*vmcommon.LogEntry, bool) { + if mock.GetLogsCalled != nil { + return mock.GetLogsCalled(txHash) + } + return nil, nil, false +} + +// SaveLogs - +func (mock *FailedTxLogsAccumulatorMock) SaveLogs(txHash []byte, tx data.TransactionHandler, logs []*vmcommon.LogEntry) error { + if mock.SaveLogsCalled != nil { + return mock.SaveLogsCalled(txHash, tx, logs) + } + return nil +} + +// Remove - +func (mock *FailedTxLogsAccumulatorMock) Remove(txHash []byte) { + if mock.RemoveCalled != nil { + mock.RemoveCalled(txHash) + } +} + +// IsInterfaceNil - +func (mock *FailedTxLogsAccumulatorMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/factory/mock/forkDetectorStub.go b/testscommon/processMocks/forkDetectorStub.go similarity index 94% rename from factory/mock/forkDetectorStub.go rename to testscommon/processMocks/forkDetectorStub.go index 640c7e3899f..80ddc4d2ebf 100644 --- a/factory/mock/forkDetectorStub.go +++ b/testscommon/processMocks/forkDetectorStub.go @@ -1,4 +1,4 @@ -package mock +package processMocks import ( "github.com/multiversx/mx-chain-core-go/data" @@ -28,7 +28,10 @@ func (fdm *ForkDetectorStub) RestoreToGenesis() { // AddHeader - func (fdm *ForkDetectorStub) AddHeader(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { - return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) + if fdm.AddHeaderCalled != nil { + return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) + } + return nil } // RemoveHeader - diff --git a/testscommon/processMocks/relayedTxV3ProcessorMock.go b/testscommon/processMocks/relayedTxV3ProcessorMock.go new file mode 100644 index 00000000000..85af9584af5 --- /dev/null +++ b/testscommon/processMocks/relayedTxV3ProcessorMock.go @@ -0,0 +1,23 @@ +package processMocks + +import ( + "github.com/multiversx/mx-chain-core-go/data/transaction" +) + +// RelayedTxV3ProcessorMock - +type RelayedTxV3ProcessorMock struct { + CheckRelayedTxCalled func(tx *transaction.Transaction) error +} + +// CheckRelayedTx - +func (mock *RelayedTxV3ProcessorMock) CheckRelayedTx(tx *transaction.Transaction) error { + if mock.CheckRelayedTxCalled != nil { + return mock.CheckRelayedTxCalled(tx) + } + return nil +} + +// IsInterfaceNil - +func (mock *RelayedTxV3ProcessorMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/realConfigsHandling.go b/testscommon/realConfigsHandling.go index 024fe336b9f..e58b36923f8 100644 --- a/testscommon/realConfigsHandling.go +++ b/testscommon/realConfigsHandling.go @@ -5,60 +5,83 @@ import ( "os/exec" "path" "strings" - "testing" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" - "github.com/stretchr/testify/require" ) // CreateTestConfigs will try to copy the whole configs directory to a temp directory and return the configs after load // The copying of the configs is required because minor adjustments of their contents is required for the tests to pass -func CreateTestConfigs(tb testing.TB, originalConfigsPath string) *config.Configs { - tempDir := tb.TempDir() - +func CreateTestConfigs(tempDir string, originalConfigsPath string) (*config.Configs, error) { newConfigsPath := path.Join(tempDir, "config") // TODO refactor this cp to work on all OSes cmd := exec.Command("cp", "-r", originalConfigsPath, newConfigsPath) err := cmd.Run() - require.Nil(tb, err) + if err != nil { + return nil, err + } newGenesisSmartContractsFilename := path.Join(newConfigsPath, "genesisSmartContracts.json") - correctTestPathInGenesisSmartContracts(tb, tempDir, newGenesisSmartContractsFilename) + err = correctTestPathInGenesisSmartContracts(tempDir, newGenesisSmartContractsFilename) + if err != nil { + return nil, err + } apiConfig, err := common.LoadApiConfig(path.Join(newConfigsPath, "api.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } generalConfig, err := common.LoadMainConfig(path.Join(newConfigsPath, "config.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } ratingsConfig, err := common.LoadRatingsConfig(path.Join(newConfigsPath, "ratings.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } economicsConfig, err := common.LoadEconomicsConfig(path.Join(newConfigsPath, "economics.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } prefsConfig, err := common.LoadPreferencesConfig(path.Join(newConfigsPath, "prefs.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } mainP2PConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "p2p.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } fullArchiveP2PConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "fullArchiveP2P.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } externalConfig, err := common.LoadExternalConfig(path.Join(newConfigsPath, "external.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } systemSCConfig, err := common.LoadSystemSmartContractsConfig(path.Join(newConfigsPath, "systemSmartContractsConfig.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } epochConfig, err := common.LoadEpochConfig(path.Join(newConfigsPath, "enableEpochs.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } roundConfig, err := common.LoadRoundConfig(path.Join(newConfigsPath, "enableRounds.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } // make the node pass the network wait constraints mainP2PConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 @@ -91,12 +114,14 @@ func CreateTestConfigs(tb testing.TB, originalConfigsPath string) *config.Config }, EpochConfig: epochConfig, RoundConfig: roundConfig, - } + }, nil } -func correctTestPathInGenesisSmartContracts(tb testing.TB, tempDir string, newGenesisSmartContractsFilename string) { +func correctTestPathInGenesisSmartContracts(tempDir string, newGenesisSmartContractsFilename string) error { input, err := os.ReadFile(newGenesisSmartContractsFilename) - require.Nil(tb, err) + if err != nil { + return err + } lines := strings.Split(string(input), "\n") for i, line := range lines { @@ -105,6 +130,5 @@ func correctTestPathInGenesisSmartContracts(tb testing.TB, tempDir string, newGe } } output := strings.Join(lines, "\n") - err = os.WriteFile(newGenesisSmartContractsFilename, []byte(output), 0644) - require.Nil(tb, err) + return os.WriteFile(newGenesisSmartContractsFilename, []byte(output), 0644) } diff --git a/epochStart/mock/rewardsCreatorStub.go b/testscommon/rewardsCreatorStub.go similarity index 90% rename from epochStart/mock/rewardsCreatorStub.go rename to testscommon/rewardsCreatorStub.go index 9073048cca7..b9b0b2b0492 100644 --- a/epochStart/mock/rewardsCreatorStub.go +++ b/testscommon/rewardsCreatorStub.go @@ -1,4 +1,4 @@ -package mock +package testscommon import ( "math/big" @@ -12,10 +12,10 @@ import ( // RewardsCreatorStub - type RewardsCreatorStub struct { CreateRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) VerifyRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error GetProtocolSustainabilityRewardsCalled func() *big.Int GetLocalTxCacheCalled func() epochStart.TransactionCacher @@ -29,7 +29,7 @@ type RewardsCreatorStub struct { // CreateRewardsMiniBlocks - func (rcs *RewardsCreatorStub) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { if rcs.CreateRewardsMiniBlocksCalled != nil { @@ -42,7 +42,7 @@ func (rcs *RewardsCreatorStub) CreateRewardsMiniBlocks( // VerifyRewardsMiniBlocks - func (rcs *RewardsCreatorStub) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { if rcs.VerifyRewardsMiniBlocksCalled != nil { diff --git a/testscommon/roundHandlerMock.go b/testscommon/roundHandlerMock.go index 976e8a55181..6c5d45cc7bc 100644 --- a/testscommon/roundHandlerMock.go +++ b/testscommon/roundHandlerMock.go @@ -10,12 +10,13 @@ type RoundHandlerMock struct { indexMut sync.RWMutex index int64 - IndexCalled func() int64 - TimeDurationCalled func() time.Duration - TimeStampCalled func() time.Time - UpdateRoundCalled func(time.Time, time.Time) - RemainingTimeCalled func(startTime time.Time, maxTime time.Duration) time.Duration - BeforeGenesisCalled func() bool + IndexCalled func() int64 + TimeDurationCalled func() time.Duration + TimeStampCalled func() time.Time + UpdateRoundCalled func(time.Time, time.Time) + RemainingTimeCalled func(startTime time.Time, maxTime time.Duration) time.Duration + BeforeGenesisCalled func() bool + IncrementIndexCalled func() } // BeforeGenesis - @@ -77,6 +78,13 @@ func (rndm *RoundHandlerMock) RemainingTime(startTime time.Time, maxTime time.Du return 4000 * time.Millisecond } +// IncrementIndex - +func (rndm *RoundHandlerMock) IncrementIndex() { + if rndm.IncrementIndexCalled != nil { + rndm.IncrementIndexCalled() + } +} + // IsInterfaceNil returns true if there is no value under the interface func (rndm *RoundHandlerMock) IsInterfaceNil() bool { return rndm == nil diff --git a/consensus/mock/sentSignatureTrackerStub.go b/testscommon/sentSignatureTrackerStub.go similarity index 52% rename from consensus/mock/sentSignatureTrackerStub.go rename to testscommon/sentSignatureTrackerStub.go index f61bcf2e778..c051d0c60a7 100644 --- a/consensus/mock/sentSignatureTrackerStub.go +++ b/testscommon/sentSignatureTrackerStub.go @@ -1,10 +1,10 @@ -package mock +package testscommon // SentSignatureTrackerStub - type SentSignatureTrackerStub struct { - StartRoundCalled func() - SignatureSentCalled func(pkBytes []byte) - ReceivedActualSignersCalled func(signersPks []string) + StartRoundCalled func() + SignatureSentCalled func(pkBytes []byte) + ResetCountersForManagedBlockSignerCalled func(signerPk []byte) } // StartRound - @@ -21,10 +21,10 @@ func (stub *SentSignatureTrackerStub) SignatureSent(pkBytes []byte) { } } -// ReceivedActualSigners - -func (stub *SentSignatureTrackerStub) ReceivedActualSigners(signersPks []string) { - if stub.ReceivedActualSignersCalled != nil { - stub.ReceivedActualSignersCalled(signersPks) +// ResetCountersForManagedBlockSigner - +func (stub *SentSignatureTrackerStub) ResetCountersForManagedBlockSigner(signerPk []byte) { + if stub.ResetCountersForManagedBlockSignerCalled != nil { + stub.ResetCountersForManagedBlockSignerCalled(signerPk) } } diff --git a/testscommon/shardingMocks/nodesCoordRegistryFactoryMock.go b/testscommon/shardingMocks/nodesCoordRegistryFactoryMock.go new file mode 100644 index 00000000000..2ed51dc9188 --- /dev/null +++ b/testscommon/shardingMocks/nodesCoordRegistryFactoryMock.go @@ -0,0 +1,32 @@ +package shardingMocks + +import ( + "encoding/json" + + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) + +// NodesCoordinatorRegistryFactoryMock - +type NodesCoordinatorRegistryFactoryMock struct { +} + +// CreateNodesCoordinatorRegistry - +func (ncr *NodesCoordinatorRegistryFactoryMock) CreateNodesCoordinatorRegistry(buff []byte) (nodesCoordinator.NodesCoordinatorRegistryHandler, error) { + registry := &nodesCoordinator.NodesCoordinatorRegistry{} + err := json.Unmarshal(buff, registry) + if err != nil { + return nil, err + } + + return registry, nil +} + +// GetRegistryData - +func (ncr *NodesCoordinatorRegistryFactoryMock) GetRegistryData(registry nodesCoordinator.NodesCoordinatorRegistryHandler, _ uint32) ([]byte, error) { + return json.Marshal(registry) +} + +// IsInterfaceNil - +func (ncr *NodesCoordinatorRegistryFactoryMock) IsInterfaceNil() bool { + return ncr == nil +} diff --git a/testscommon/shardingMocks/nodesCoordinatorMock.go b/testscommon/shardingMocks/nodesCoordinatorMock.go index 8ea7177705b..9f1b872e2ab 100644 --- a/testscommon/shardingMocks/nodesCoordinatorMock.go +++ b/testscommon/shardingMocks/nodesCoordinatorMock.go @@ -5,27 +5,31 @@ import ( "fmt" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" ) // NodesCoordinatorMock defines the behaviour of a struct able to do validator group selection type NodesCoordinatorMock struct { - Validators map[uint32][]nodesCoordinator.Validator - ShardConsensusSize uint32 - MetaConsensusSize uint32 - ShardId uint32 - NbShards uint32 - GetSelectedPublicKeysCalled func(selection []byte, shardId uint32, epoch uint32) (publicKeys []string, err error) - GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) - GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) - SetNodesPerShardsCalled func(nodes map[uint32][]nodesCoordinator.Validator, epoch uint32) error - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) - GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) - GetAllEligibleValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) - GetAllWaitingValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) - ConsensusGroupSizeCalled func(uint32) int - GetValidatorsIndexesCalled func(publicKeys []string, epoch uint32) ([]uint64, error) + Validators map[uint32][]nodesCoordinator.Validator + ShardConsensusSize uint32 + MetaConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte, shardId uint32, epoch uint32) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) + SetNodesPerShardsCalled func(nodes map[uint32][]nodesCoordinator.Validator, epoch uint32) error + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) + GetAllEligibleValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) + GetAllWaitingValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) + ConsensusGroupSizeCalled func(uint32) int + GetValidatorsIndexesCalled func(publicKeys []string, epoch uint32) ([]uint64, error) + GetAllShuffledOutValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) + GetShuffledOutToAuctionValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) + GetNumTotalEligibleCalled func() uint64 } // NewNodesCoordinatorMock - @@ -78,6 +82,9 @@ func (ncm *NodesCoordinatorMock) GetAllLeavingValidatorsPublicKeys(_ uint32) (ma // GetNumTotalEligible - func (ncm *NodesCoordinatorMock) GetNumTotalEligible() uint64 { + if ncm.GetNumTotalEligibleCalled != nil { + return ncm.GetNumTotalEligibleCalled() + } return 1 } @@ -97,6 +104,23 @@ func (ncm *NodesCoordinatorMock) GetAllWaitingValidatorsPublicKeys(_ uint32) (ma return nil, nil } +// GetAllShuffledOutValidatorsPublicKeys - +func (ncm *NodesCoordinatorMock) GetAllShuffledOutValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { + if ncm.GetAllShuffledOutValidatorsPublicKeysCalled != nil { + return ncm.GetAllShuffledOutValidatorsPublicKeysCalled(epoch) + } + return nil, nil +} + +// GetShuffledOutToAuctionValidatorsPublicKeys - +func (ncm *NodesCoordinatorMock) GetShuffledOutToAuctionValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { + if ncm.GetShuffledOutToAuctionValidatorsPublicKeysCalled != nil { + return ncm.GetShuffledOutToAuctionValidatorsPublicKeysCalled(epoch) + } + + return nil, nil +} + // GetValidatorsIndexes - func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string, epoch uint32) ([]uint64, error) { if ncm.GetValidatorsIndexesCalled != nil { diff --git a/testscommon/shardingMocks/nodesCoordinatorStub.go b/testscommon/shardingMocks/nodesCoordinatorStub.go index a9d3aecf380..a142f0509ed 100644 --- a/testscommon/shardingMocks/nodesCoordinatorStub.go +++ b/testscommon/shardingMocks/nodesCoordinatorStub.go @@ -2,13 +2,13 @@ package shardingMocks import ( "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" ) // NodesCoordinatorStub - type NodesCoordinatorStub struct { - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]nodesCoordinator.Validator, error) GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) @@ -21,10 +21,11 @@ type NodesCoordinatorStub struct { GetConsensusWhitelistedNodesCalled func(epoch uint32) (map[string]struct{}, error) GetOwnPublicKeyCalled func() []byte GetWaitingEpochsLeftForPublicKeyCalled func(publicKey []byte) (uint32, error) + GetNumTotalEligibleCalled func() uint64 } // NodesCoordinatorToRegistry - -func (ncm *NodesCoordinatorStub) NodesCoordinatorToRegistry() *nodesCoordinator.NodesCoordinatorRegistry { +func (ncm *NodesCoordinatorStub) NodesCoordinatorToRegistry(uint32) nodesCoordinator.NodesCoordinatorRegistryHandler { return nil } @@ -51,7 +52,7 @@ func (ncm *NodesCoordinatorStub) GetAllLeavingValidatorsPublicKeys(_ uint32) (ma } // SetConfig - -func (ncm *NodesCoordinatorStub) SetConfig(_ *nodesCoordinator.NodesCoordinatorRegistry) error { +func (ncm *NodesCoordinatorStub) SetConfig(_ nodesCoordinator.NodesCoordinatorRegistryHandler) error { return nil } @@ -77,8 +78,21 @@ func (ncm *NodesCoordinatorStub) GetAllWaitingValidatorsPublicKeys(epoch uint32) return nil, nil } +// GetAllShuffledOutValidatorsPublicKeys - +func (ncm *NodesCoordinatorStub) GetAllShuffledOutValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { + return nil, nil +} + +// GetShuffledOutToAuctionValidatorsPublicKeys - +func (ncm *NodesCoordinatorStub) GetShuffledOutToAuctionValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { + return nil, nil +} + // GetNumTotalEligible - func (ncm *NodesCoordinatorStub) GetNumTotalEligible() uint64 { + if ncm.GetNumTotalEligibleCalled != nil { + return ncm.GetNumTotalEligibleCalled() + } return 1 } @@ -103,8 +117,8 @@ func (ncm *NodesCoordinatorStub) ComputeConsensusGroup( shardId uint32, epoch uint32, ) (validatorsGroup []nodesCoordinator.Validator, err error) { - if ncm.ComputeValidatorsGroupCalled != nil { - return ncm.ComputeValidatorsGroupCalled(randomness, round, shardId, epoch) + if ncm.ComputeConsensusGroupCalled != nil { + return ncm.ComputeConsensusGroupCalled(randomness, round, shardId, epoch) } var list []nodesCoordinator.Validator diff --git a/testscommon/simpleNFTStorageHandlerStub.go b/testscommon/simpleNFTStorageHandlerStub.go index 7e5fdcf1d8c..d12aabc9f4c 100644 --- a/testscommon/simpleNFTStorageHandlerStub.go +++ b/testscommon/simpleNFTStorageHandlerStub.go @@ -22,8 +22,8 @@ func (s *SimpleNFTStorageHandlerStub) GetESDTNFTTokenOnDestination(accnt vmcommo return &esdt.ESDigitalToken{Value: big.NewInt(0)}, true, nil } -// SaveNFTMetaDataToSystemAccount - -func (s *SimpleNFTStorageHandlerStub) SaveNFTMetaDataToSystemAccount(tx data.TransactionHandler) error { +// SaveNFTMetaData - +func (s *SimpleNFTStorageHandlerStub) SaveNFTMetaData(tx data.TransactionHandler) error { if s.SaveNFTMetaDataToSystemAccountCalled != nil { return s.SaveNFTMetaDataToSystemAccountCalled(tx) } diff --git a/testscommon/stakingcommon/auctionListSelectorStub.go b/testscommon/stakingcommon/auctionListSelectorStub.go new file mode 100644 index 00000000000..8cc24960c82 --- /dev/null +++ b/testscommon/stakingcommon/auctionListSelectorStub.go @@ -0,0 +1,25 @@ +package stakingcommon + +import "github.com/multiversx/mx-chain-go/state" + +// AuctionListSelectorStub - +type AuctionListSelectorStub struct { + SelectNodesFromAuctionListCalled func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error +} + +// SelectNodesFromAuctionList - +func (als *AuctionListSelectorStub) SelectNodesFromAuctionList( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, +) error { + if als.SelectNodesFromAuctionListCalled != nil { + return als.SelectNodesFromAuctionListCalled(validatorsInfoMap, randomness) + } + + return nil +} + +// IsInterfaceNil - +func (als *AuctionListSelectorStub) IsInterfaceNil() bool { + return als == nil +} diff --git a/testscommon/stakingcommon/stakingCommon.go b/testscommon/stakingcommon/stakingCommon.go new file mode 100644 index 00000000000..1af9b441b9c --- /dev/null +++ b/testscommon/stakingcommon/stakingCommon.go @@ -0,0 +1,333 @@ +package stakingcommon + +import ( + "math/big" + "strconv" + + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/genesis/process/disabled" + "github.com/multiversx/mx-chain-go/process" + economicsHandler "github.com/multiversx/mx-chain-go/process/economics" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("testscommon/stakingCommon") + +// RegisterValidatorKeys will register validator's staked key in the provided accounts db +func RegisterValidatorKeys( + accountsDB state.AccountsAdapter, + ownerAddress []byte, + rewardAddress []byte, + stakedKeys [][]byte, + totalStake *big.Int, + marshaller marshal.Marshalizer, +) { + AddValidatorData(accountsDB, ownerAddress, stakedKeys, totalStake, marshaller) + AddStakingData(accountsDB, ownerAddress, rewardAddress, stakedKeys, marshaller) + _, err := accountsDB.Commit() + log.LogIfError(err) +} + +// AddValidatorData will add the validator's registered keys in the provided accounts db +func AddValidatorData( + accountsDB state.AccountsAdapter, + ownerKey []byte, + registeredKeys [][]byte, + totalStake *big.Int, + marshaller marshal.Marshalizer, +) { + validatorSC := LoadUserAccount(accountsDB, vm.ValidatorSCAddress) + ownerStoredData, _, _ := validatorSC.RetrieveValue(ownerKey) + validatorData := &systemSmartContracts.ValidatorDataV2{} + if len(ownerStoredData) != 0 { + _ = marshaller.Unmarshal(validatorData, ownerStoredData) + validatorData.BlsPubKeys = append(validatorData.BlsPubKeys, registeredKeys...) + validatorData.TotalStakeValue = totalStake + } else { + validatorData = &systemSmartContracts.ValidatorDataV2{ + RegisterNonce: 0, + Epoch: 0, + RewardAddress: ownerKey, + TotalStakeValue: totalStake, + LockedStake: big.NewInt(0), + TotalUnstaked: big.NewInt(0), + BlsPubKeys: registeredKeys, + NumRegistered: uint32(len(registeredKeys)), + } + } + + marshaledData, _ := marshaller.Marshal(validatorData) + _ = validatorSC.SaveKeyValue(ownerKey, marshaledData) + + _ = accountsDB.SaveAccount(validatorSC) +} + +// AddStakingData will add the owner's staked keys in the provided accounts db +func AddStakingData( + accountsDB state.AccountsAdapter, + ownerAddress []byte, + rewardAddress []byte, + stakedKeys [][]byte, + marshaller marshal.Marshalizer, +) { + stakedData := &systemSmartContracts.StakedDataV2_0{ + Staked: true, + RewardAddress: rewardAddress, + OwnerAddress: ownerAddress, + StakeValue: big.NewInt(100), + } + marshaledData, _ := marshaller.Marshal(stakedData) + + stakingSCAcc := LoadUserAccount(accountsDB, vm.StakingSCAddress) + for _, key := range stakedKeys { + _ = stakingSCAcc.SaveKeyValue(key, marshaledData) + } + + _ = accountsDB.SaveAccount(stakingSCAcc) +} + +// AddKeysToWaitingList will add the owner's provided bls keys in the staking queue list +func AddKeysToWaitingList( + accountsDB state.AccountsAdapter, + waitingKeys [][]byte, + marshaller marshal.Marshalizer, + rewardAddress []byte, + ownerAddress []byte, +) { + if len(waitingKeys) == 0 { + return + } + + stakingSCAcc := LoadUserAccount(accountsDB, vm.StakingSCAddress) + waitingList := getWaitingList(stakingSCAcc, marshaller) + + waitingListAlreadyHasElements := waitingList.Length > 0 + waitingListLastKeyBeforeAddingNewKeys := waitingList.LastKey + previousKey := waitingList.LastKey + if !waitingListAlreadyHasElements { + waitingList.FirstKey = getPrefixedWaitingKey(waitingKeys[0]) + previousKey = waitingList.FirstKey + } + + numWaitingKeys := len(waitingKeys) + waitingList.LastKey = getPrefixedWaitingKey(waitingKeys[numWaitingKeys-1]) + waitingList.Length += uint32(numWaitingKeys) + saveWaitingList(stakingSCAcc, marshaller, waitingList) + + for i, waitingKey := range waitingKeys { + waitingListElement := &systemSmartContracts.ElementInList{ + BLSPublicKey: waitingKey, + PreviousKey: previousKey, + NextKey: make([]byte, 0), + } + + if i < numWaitingKeys-1 { + nextKey := getPrefixedWaitingKey(waitingKeys[i+1]) + waitingListElement.NextKey = nextKey + } + + prefixedWaitingKey := getPrefixedWaitingKey(waitingKey) + saveStakedWaitingKey(stakingSCAcc, marshaller, rewardAddress, ownerAddress, waitingKey) + saveElemInList(stakingSCAcc, marshaller, waitingListElement, prefixedWaitingKey) + + previousKey = prefixedWaitingKey + } + + if waitingListAlreadyHasElements { + lastElem, _ := GetWaitingListElement(stakingSCAcc, marshaller, waitingListLastKeyBeforeAddingNewKeys) + lastElem.NextKey = getPrefixedWaitingKey(waitingKeys[0]) + saveElemInList(stakingSCAcc, marshaller, lastElem, waitingListLastKeyBeforeAddingNewKeys) + } + + _ = accountsDB.SaveAccount(stakingSCAcc) +} + +func getWaitingList( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, +) *systemSmartContracts.WaitingList { + marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) + waitingList := &systemSmartContracts.WaitingList{} + _ = marshaller.Unmarshal(waitingList, marshaledData) + + return waitingList +} + +func saveWaitingList( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + waitingList *systemSmartContracts.WaitingList, +) { + marshaledData, _ := marshaller.Marshal(waitingList) + _ = stakingSCAcc.SaveKeyValue([]byte("waitingList"), marshaledData) +} + +func getPrefixedWaitingKey(key []byte) []byte { + return []byte("w_" + string(key)) +} + +func saveStakedWaitingKey( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + rewardAddress []byte, + ownerAddress []byte, + key []byte, +) { + stakedData := &systemSmartContracts.StakedDataV2_0{ + Waiting: true, + RewardAddress: rewardAddress, + OwnerAddress: ownerAddress, + StakeValue: big.NewInt(100), + } + + marshaledData, _ := marshaller.Marshal(stakedData) + _ = stakingSCAcc.SaveKeyValue(key, marshaledData) +} + +func saveElemInList( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + elem *systemSmartContracts.ElementInList, + key []byte, +) { + marshaledData, _ := marshaller.Marshal(elem) + _ = stakingSCAcc.SaveKeyValue(key, marshaledData) +} + +// GetWaitingListElement returns the element in waiting list saved at the provided key +func GetWaitingListElement( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + key []byte, +) (*systemSmartContracts.ElementInList, error) { + marshaledData, _, _ := stakingSCAcc.RetrieveValue(key) + if len(marshaledData) == 0 { + return nil, vm.ErrElementNotFound + } + + element := &systemSmartContracts.ElementInList{} + err := marshaller.Unmarshal(element, marshaledData) + if err != nil { + return nil, err + } + + return element, nil +} + +// LoadUserAccount returns address's state.UserAccountHandler from the provided db +func LoadUserAccount(accountsDB state.AccountsAdapter, address []byte) state.UserAccountHandler { + acc, _ := accountsDB.LoadAccount(address) + return acc.(state.UserAccountHandler) +} + +// CreateEconomicsData returns an initialized process.EconomicsDataHandler +func CreateEconomicsData() process.EconomicsDataHandler { + maxGasLimitPerBlock := strconv.FormatUint(1500000000, 10) + minGasPrice := strconv.FormatUint(10, 10) + minGasLimit := strconv.FormatUint(10, 10) + + argsNewEconomicsData := economicsHandler.ArgsNewEconomicsData{ + Economics: &config.EconomicsConfig{ + GlobalSettings: config.GlobalSettings{ + GenesisTotalSupply: "2000000000000000000000", + MinimumInflation: 0, + YearSettings: []*config.YearSetting{ + { + Year: 0, + MaximumInflation: 0.01, + }, + }, + }, + RewardsSettings: config.RewardsSettings{ + RewardsConfigByEpoch: []config.EpochRewardSettings{ + { + LeaderPercentage: 0.1, + DeveloperPercentage: 0.1, + ProtocolSustainabilityPercentage: 0.1, + ProtocolSustainabilityAddress: "protocol", + TopUpGradientPoint: "300000000000000000000", + TopUpFactor: 0.25, + }, + }, + }, + FeeSettings: config.FeeSettings{ + GasLimitSettings: []config.GasLimitSetting{ + { + MaxGasLimitPerBlock: maxGasLimitPerBlock, + MaxGasLimitPerMiniBlock: maxGasLimitPerBlock, + MaxGasLimitPerMetaBlock: maxGasLimitPerBlock, + MaxGasLimitPerMetaMiniBlock: maxGasLimitPerBlock, + MaxGasLimitPerTx: maxGasLimitPerBlock, + MinGasLimit: minGasLimit, + ExtraGasLimitGuardedTx: maxGasLimitPerBlock, + }, + }, + MinGasPrice: minGasPrice, + GasPerDataByte: "1", + GasPriceModifier: 1.0, + MaxGasPriceSetGuardian: minGasPrice, + }, + }, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + TxVersionChecker: &disabled.TxVersionChecker{}, + } + economicsData, _ := economicsHandler.NewEconomicsData(argsNewEconomicsData) + return economicsData +} + +// SaveNodesConfig saves the nodes config in accounts db under "nodesConfig" key with provided params +func SaveNodesConfig( + accountsDB state.AccountsAdapter, + marshaller marshal.Marshalizer, + stakedNodes, + minNumNodes, + maxNumNodes int64, +) { + nodesConfigData := &systemSmartContracts.StakingNodesConfig{ + StakedNodes: stakedNodes, + MinNumNodes: minNumNodes, + MaxNumNodes: maxNumNodes, + } + nodesDataBytes, err := marshaller.Marshal(nodesConfigData) + log.LogIfError(err) + + account, err := accountsDB.LoadAccount(vm.StakingSCAddress) + log.LogIfError(err) + + userAccount, _ := account.(state.UserAccountHandler) + err = userAccount.SaveKeyValue([]byte("nodesConfig"), nodesDataBytes) + log.LogIfError(err) + err = accountsDB.SaveAccount(account) + log.LogIfError(err) + _, err = accountsDB.Commit() + log.LogIfError(err) +} + +// SaveDelegationManagerConfig will save a mock configuration for the delegation manager SC +func SaveDelegationManagerConfig(accountsDB state.AccountsAdapter, marshaller marshal.Marshalizer) { + managementData := &systemSmartContracts.DelegationManagement{ + MinDeposit: big.NewInt(100), + LastAddress: vm.FirstDelegationSCAddress, + MinDelegationAmount: big.NewInt(1), + } + marshaledData, err := marshaller.Marshal(managementData) + log.LogIfError(err) + + acc, err := accountsDB.LoadAccount(vm.DelegationManagerSCAddress) + log.LogIfError(err) + delegationAcc, _ := acc.(state.UserAccountHandler) + + err = delegationAcc.SaveKeyValue([]byte("delegationManagement"), marshaledData) + log.LogIfError(err) + err = accountsDB.SaveAccount(delegationAcc) + log.LogIfError(err) + _, err = accountsDB.Commit() + log.LogIfError(err) +} diff --git a/testscommon/stakingcommon/stakingDataProviderStub.go b/testscommon/stakingcommon/stakingDataProviderStub.go new file mode 100644 index 00000000000..27ec1a550e2 --- /dev/null +++ b/testscommon/stakingcommon/stakingDataProviderStub.go @@ -0,0 +1,115 @@ +package stakingcommon + +import ( + "math/big" + + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" +) + +// StakingDataProviderStub - +type StakingDataProviderStub struct { + CleanCalled func() + PrepareStakingDataCalled func(validatorsMap state.ShardValidatorsInfoMapHandler) error + GetTotalStakeEligibleNodesCalled func() *big.Int + GetTotalTopUpStakeEligibleNodesCalled func() *big.Int + GetNodeStakedTopUpCalled func(blsKey []byte) (*big.Int, error) + FillValidatorInfoCalled func(validator state.ValidatorInfoHandler) error + ComputeUnQualifiedNodesCalled func(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) + GetBlsKeyOwnerCalled func(blsKey []byte) (string, error) + GetOwnersDataCalled func() map[string]*epochStart.OwnerData +} + +// FillValidatorInfo - +func (sdps *StakingDataProviderStub) FillValidatorInfo(validator state.ValidatorInfoHandler) error { + if sdps.FillValidatorInfoCalled != nil { + return sdps.FillValidatorInfoCalled(validator) + } + return nil +} + +// ComputeUnQualifiedNodes - +func (sdps *StakingDataProviderStub) ComputeUnQualifiedNodes(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { + if sdps.ComputeUnQualifiedNodesCalled != nil { + return sdps.ComputeUnQualifiedNodesCalled(validatorInfos) + } + return nil, nil, nil +} + +// GetTotalStakeEligibleNodes - +func (sdps *StakingDataProviderStub) GetTotalStakeEligibleNodes() *big.Int { + if sdps.GetTotalStakeEligibleNodesCalled != nil { + return sdps.GetTotalStakeEligibleNodesCalled() + } + return big.NewInt(0) +} + +// GetTotalTopUpStakeEligibleNodes - +func (sdps *StakingDataProviderStub) GetTotalTopUpStakeEligibleNodes() *big.Int { + if sdps.GetTotalTopUpStakeEligibleNodesCalled != nil { + return sdps.GetTotalTopUpStakeEligibleNodesCalled() + } + return big.NewInt(0) +} + +// GetNodeStakedTopUp - +func (sdps *StakingDataProviderStub) GetNodeStakedTopUp(blsKey []byte) (*big.Int, error) { + if sdps.GetNodeStakedTopUpCalled != nil { + return sdps.GetNodeStakedTopUpCalled(blsKey) + } + return big.NewInt(0), nil +} + +// PrepareStakingData - +func (sdps *StakingDataProviderStub) PrepareStakingData(validatorsMap state.ShardValidatorsInfoMapHandler) error { + if sdps.PrepareStakingDataCalled != nil { + return sdps.PrepareStakingDataCalled(validatorsMap) + } + return nil +} + +// Clean - +func (sdps *StakingDataProviderStub) Clean() { + if sdps.CleanCalled != nil { + sdps.CleanCalled() + } +} + +// GetBlsKeyOwner - +func (sdps *StakingDataProviderStub) GetBlsKeyOwner(blsKey []byte) (string, error) { + if sdps.GetBlsKeyOwnerCalled != nil { + return sdps.GetBlsKeyOwnerCalled(blsKey) + } + return "", nil +} + +// GetNumOfValidatorsInCurrentEpoch - +func (sdps *StakingDataProviderStub) GetNumOfValidatorsInCurrentEpoch() uint32 { + return 0 +} + +// GetCurrentEpochValidatorStats - +func (sdps *StakingDataProviderStub) GetCurrentEpochValidatorStats() epochStart.ValidatorStatsInEpoch { + return epochStart.ValidatorStatsInEpoch{ + Eligible: map[uint32]int{}, + Waiting: map[uint32]int{}, + Leaving: map[uint32]int{}, + } +} + +// GetOwnersData - +func (sdps *StakingDataProviderStub) GetOwnersData() map[string]*epochStart.OwnerData { + if sdps.GetOwnersDataCalled != nil { + return sdps.GetOwnersDataCalled() + } + return nil +} + +// EpochConfirmed - +func (sdps *StakingDataProviderStub) EpochConfirmed(uint32, uint64) { +} + +// IsInterfaceNil - +func (sdps *StakingDataProviderStub) IsInterfaceNil() bool { + return sdps == nil +} diff --git a/node/mock/validatorsProviderStub.go b/testscommon/stakingcommon/validatorsProviderStub.go similarity index 51% rename from node/mock/validatorsProviderStub.go rename to testscommon/stakingcommon/validatorsProviderStub.go index 98ea652340b..0db49b4fde8 100644 --- a/node/mock/validatorsProviderStub.go +++ b/testscommon/stakingcommon/validatorsProviderStub.go @@ -1,12 +1,15 @@ -package mock +package stakingcommon import ( "github.com/multiversx/mx-chain-core-go/data/validator" + "github.com/multiversx/mx-chain-go/common" ) // ValidatorsProviderStub - type ValidatorsProviderStub struct { GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics + GetAuctionListCalled func() ([]*common.AuctionListValidatorAPIResponse, error) + ForceUpdateCalled func() error } // GetLatestValidators - @@ -14,6 +17,25 @@ func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.Va if vp.GetLatestValidatorsCalled != nil { return vp.GetLatestValidatorsCalled() } + + return nil +} + +// GetAuctionList - +func (vp *ValidatorsProviderStub) GetAuctionList() ([]*common.AuctionListValidatorAPIResponse, error) { + if vp.GetAuctionListCalled != nil { + return vp.GetAuctionListCalled() + } + + return nil, nil +} + +// ForceUpdate - +func (vp *ValidatorsProviderStub) ForceUpdate() error { + if vp.ForceUpdateCalled != nil { + return vp.ForceUpdateCalled() + } + return nil } diff --git a/testscommon/state/accountAdapterStub.go b/testscommon/state/accountAdapterStub.go index 433722f7e21..fa9305f8222 100644 --- a/testscommon/state/accountAdapterStub.go +++ b/testscommon/state/accountAdapterStub.go @@ -177,14 +177,14 @@ func (aas *StateUserAccountHandlerStub) ClaimDeveloperRewards(senderAddr []byte) return nil, nil } -//AddToDeveloperReward - +// AddToDeveloperReward - func (aas *StateUserAccountHandlerStub) AddToDeveloperReward(val *big.Int) { if aas.AddToDeveloperRewardCalled != nil { aas.AddToDeveloperRewardCalled(val) } } -//GetDeveloperReward - +// GetDeveloperReward - func (aas *StateUserAccountHandlerStub) GetDeveloperReward() *big.Int { if aas.GetDeveloperRewardCalled != nil { return aas.GetDeveloperRewardCalled() @@ -230,7 +230,7 @@ func (aas *StateUserAccountHandlerStub) GetUserName() []byte { return nil } -//IsGuarded - +// IsGuarded - func (aas *StateUserAccountHandlerStub) IsGuarded() bool { if aas.IsGuardedCalled != nil { return aas.IsGuardedCalled() diff --git a/testscommon/state/accountWrapperMock.go b/testscommon/state/accountWrapperMock.go index 9cbac29d8ce..8f5e794646a 100644 --- a/testscommon/state/accountWrapperMock.go +++ b/testscommon/state/accountWrapperMock.go @@ -205,7 +205,7 @@ func (awm *AccountWrapMock) SetDataTrie(trie common.Trie) { awm.trackableDataTrie.SetDataTrie(trie) } -//IncreaseNonce adds the given value to the current nonce +// IncreaseNonce adds the given value to the current nonce func (awm *AccountWrapMock) IncreaseNonce(val uint64) { awm.nonce = awm.nonce + val } diff --git a/testscommon/state/accountsAdapterStub.go b/testscommon/state/accountsAdapterStub.go index abb1788a076..034d1f6ae26 100644 --- a/testscommon/state/accountsAdapterStub.go +++ b/testscommon/state/accountsAdapterStub.go @@ -23,8 +23,7 @@ type AccountsStub struct { JournalLenCalled func() int RevertToSnapshotCalled func(snapshot int) error RootHashCalled func() ([]byte, error) - RecreateTrieCalled func(rootHash []byte) error - RecreateTrieFromEpochCalled func(options common.RootHashHolder) error + RecreateTrieCalled func(options common.RootHashHolder) error PruneTrieCalled func(rootHash []byte, identifier state.TriePruningIdentifier, handler state.PruningHandler) CancelPruneCalled func(rootHash []byte, identifier state.TriePruningIdentifier) SnapshotStateCalled func(rootHash []byte, epoch uint32) @@ -176,18 +175,9 @@ func (as *AccountsStub) RootHash() ([]byte, error) { } // RecreateTrie - -func (as *AccountsStub) RecreateTrie(rootHash []byte) error { +func (as *AccountsStub) RecreateTrie(options common.RootHashHolder) error { if as.RecreateTrieCalled != nil { - return as.RecreateTrieCalled(rootHash) - } - - return errNotImplemented -} - -// RecreateTrieFromEpoch - -func (as *AccountsStub) RecreateTrieFromEpoch(options common.RootHashHolder) error { - if as.RecreateTrieFromEpochCalled != nil { - return as.RecreateTrieFromEpochCalled(options) + return as.RecreateTrieCalled(options) } return errNotImplemented diff --git a/testscommon/state/peerAccountHandlerMock.go b/testscommon/state/peerAccountHandlerMock.go index b3283be1280..870836cc00d 100644 --- a/testscommon/state/peerAccountHandlerMock.go +++ b/testscommon/state/peerAccountHandlerMock.go @@ -14,6 +14,7 @@ type PeerAccountHandlerMock struct { IncreaseValidatorSuccessRateValue uint32 DecreaseValidatorSuccessRateValue uint32 IncreaseValidatorIgnoredSignaturesValue uint32 + PreviousList string IncreaseLeaderSuccessRateCalled func(uint32) DecreaseLeaderSuccessRateCalled func(uint32) @@ -52,11 +53,26 @@ func (p *PeerAccountHandlerMock) GetList() string { return "" } +// GetPreviousList - +func (p *PeerAccountHandlerMock) GetPreviousList() string { + return "" +} + // GetIndexInList - func (p *PeerAccountHandlerMock) GetIndexInList() uint32 { return 0 } +// GetPreviousIndexInList - +func (p *PeerAccountHandlerMock) GetPreviousIndexInList() uint32 { + return 0 +} + +// GetBLSPublicKey - +func (p *PeerAccountHandlerMock) GetBLSPublicKey() []byte { + return nil +} + // SetBLSPublicKey - func (p *PeerAccountHandlerMock) SetBLSPublicKey([]byte) error { return nil @@ -290,13 +306,18 @@ func (p *PeerAccountHandlerMock) SetConsecutiveProposerMisses(consecutiveMisses } // SetListAndIndex - -func (p *PeerAccountHandlerMock) SetListAndIndex(shardID uint32, list string, index uint32) { +func (p *PeerAccountHandlerMock) SetListAndIndex(shardID uint32, list string, index uint32, _ bool) { if p.SetListAndIndexCalled != nil { p.SetListAndIndexCalled(shardID, list, index) } } +// SetPreviousList - +func (p *PeerAccountHandlerMock) SetPreviousList(list string) { + p.PreviousList = list +} + // IsInterfaceNil - func (p *PeerAccountHandlerMock) IsInterfaceNil() bool { - return false + return p == nil } diff --git a/testscommon/state/userAccountStub.go b/testscommon/state/userAccountStub.go index 3e4278b2d38..ce54f059252 100644 --- a/testscommon/state/userAccountStub.go +++ b/testscommon/state/userAccountStub.go @@ -30,6 +30,7 @@ type UserAccountStub struct { RetrieveValueCalled func(_ []byte) ([]byte, uint32, error) SetDataTrieCalled func(dataTrie common.Trie) GetRootHashCalled func() []byte + SaveKeyValueCalled func(key []byte, value []byte) error } // HasNewCode - @@ -172,7 +173,10 @@ func (u *UserAccountStub) RetrieveValue(key []byte) ([]byte, uint32, error) { } // SaveKeyValue - -func (u *UserAccountStub) SaveKeyValue(_ []byte, _ []byte) error { +func (u *UserAccountStub) SaveKeyValue(key []byte, value []byte) error { + if u.SaveKeyValueCalled != nil { + return u.SaveKeyValueCalled(key, value) + } return nil } diff --git a/testscommon/stateStatisticsHandlerStub.go b/testscommon/stateStatisticsHandlerStub.go new file mode 100644 index 00000000000..bc13bea90d4 --- /dev/null +++ b/testscommon/stateStatisticsHandlerStub.go @@ -0,0 +1,136 @@ +package testscommon + +// StateStatisticsHandlerStub - +type StateStatisticsHandlerStub struct { + ResetCalled func() + ResetSnapshotCalled func() + IncrementCacheCalled func() + CacheCalled func() uint64 + IncrementSnapshotCacheCalled func() + SnapshotCacheCalled func() uint64 + IncrementPersisterCalled func(epoch uint32) + PersisterCalled func(epoch uint32) uint64 + IncrementSnapshotPersisterCalled func(epoch uint32) + SnapshotPersisterCalled func(epoch uint32) uint64 + IncrementTrieCalled func() + TrieCalled func() uint64 + ProcessingStatsCalled func() []string + SnapshotStatsCalled func() []string +} + +// Reset - +func (stub *StateStatisticsHandlerStub) Reset() { + if stub.ResetCalled != nil { + stub.ResetCalled() + } +} + +// ResetSnapshot - +func (stub *StateStatisticsHandlerStub) ResetSnapshot() { + if stub.ResetSnapshotCalled != nil { + stub.ResetSnapshotCalled() + } +} + +// IncrementCache - +func (stub *StateStatisticsHandlerStub) IncrementCache() { + if stub.IncrementCacheCalled != nil { + stub.IncrementCacheCalled() + } +} + +// Cache - +func (stub *StateStatisticsHandlerStub) Cache() uint64 { + if stub.CacheCalled != nil { + return stub.CacheCalled() + } + + return 0 +} + +// IncrementSnapshotCache - +func (stub *StateStatisticsHandlerStub) IncrementSnapshotCache() { + if stub.IncrementSnapshotCacheCalled != nil { + stub.IncrementSnapshotCacheCalled() + } +} + +// SnapshotCache - +func (stub *StateStatisticsHandlerStub) SnapshotCache() uint64 { + if stub.SnapshotCacheCalled != nil { + return stub.SnapshotCacheCalled() + } + + return 0 +} + +// IncrementPersister - +func (stub *StateStatisticsHandlerStub) IncrementPersister(epoch uint32) { + if stub.IncrementPersisterCalled != nil { + stub.IncrementPersisterCalled(epoch) + } +} + +// Persister - +func (stub *StateStatisticsHandlerStub) Persister(epoch uint32) uint64 { + if stub.PersisterCalled != nil { + return stub.PersisterCalled(epoch) + } + + return 0 +} + +// IncrementSnapshotPersister - +func (stub *StateStatisticsHandlerStub) IncrementSnapshotPersister(epoch uint32) { + if stub.IncrementSnapshotPersisterCalled != nil { + stub.IncrementSnapshotPersisterCalled(epoch) + } +} + +// SnapshotPersister - +func (stub *StateStatisticsHandlerStub) SnapshotPersister(epoch uint32) uint64 { + if stub.SnapshotPersisterCalled != nil { + return stub.SnapshotPersisterCalled(epoch) + } + + return 0 +} + +// IncrementTrie - +func (stub *StateStatisticsHandlerStub) IncrementTrie() { + if stub.IncrementTrieCalled != nil { + stub.IncrementTrieCalled() + } +} + +// Trie - +func (stub *StateStatisticsHandlerStub) Trie() uint64 { + if stub.TrieCalled != nil { + return stub.TrieCalled() + } + + return 0 +} + +// ProcessingStats - +func (stub *StateStatisticsHandlerStub) ProcessingStats() []string { + if stub.ProcessingStatsCalled != nil { + return stub.ProcessingStatsCalled() + } + + return make([]string, 0) +} + +// SnapshotStats - +func (stub *StateStatisticsHandlerStub) SnapshotStats() []string { + if stub.SnapshotStatsCalled != nil { + return stub.SnapshotStatsCalled() + } + + return make([]string, 0) +} + +// IsInterfaceNil - +func (stub *StateStatisticsHandlerStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/storageManager/storageManagerStub.go b/testscommon/storageManager/storageManagerStub.go index b14d6c460a6..60e10541da6 100644 --- a/testscommon/storageManager/storageManagerStub.go +++ b/testscommon/storageManager/storageManagerStub.go @@ -7,30 +7,30 @@ import ( // StorageManagerStub - type StorageManagerStub struct { - PutCalled func([]byte, []byte) error - PutInEpochCalled func([]byte, []byte, uint32) error - PutInEpochWithoutCacheCalled func([]byte, []byte, uint32) error - GetCalled func([]byte) ([]byte, error) - GetFromCurrentEpochCalled func([]byte) ([]byte, error) - TakeSnapshotCalled func(string, []byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler, uint32) - GetDbThatContainsHashCalled func([]byte) common.BaseStorer - IsPruningEnabledCalled func() bool - IsPruningBlockedCalled func() bool - EnterPruningBufferingModeCalled func() - ExitPruningBufferingModeCalled func() - RemoveFromCurrentEpochCalled func([]byte) error - RemoveCalled func([]byte) error - IsInterfaceNilCalled func() bool - SetEpochForPutOperationCalled func(uint32) - ShouldTakeSnapshotCalled func() bool - GetLatestStorageEpochCalled func() (uint32, error) - IsClosedCalled func() bool - GetBaseTrieStorageManagerCalled func() common.StorageManager - GetIdentifierCalled func() string - CloseCalled func() error - RemoveFromAllActiveEpochsCalled func(hash []byte) error - IsSnapshotSupportedCalled func() bool - GetStateStatsHandlerCalled func() common.StateStatisticsHandler + PutCalled func([]byte, []byte) error + PutInEpochCalled func([]byte, []byte, uint32) error + PutInEpochWithoutCacheCalled func([]byte, []byte, uint32) error + GetCalled func([]byte) ([]byte, error) + GetFromCurrentEpochCalled func([]byte) ([]byte, error) + TakeSnapshotCalled func(string, []byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler, uint32) + GetDbThatContainsHashCalled func([]byte) common.BaseStorer + IsPruningEnabledCalled func() bool + IsPruningBlockedCalled func() bool + EnterPruningBufferingModeCalled func() + ExitPruningBufferingModeCalled func() + RemoveFromCurrentEpochCalled func([]byte) error + RemoveCalled func([]byte) error + IsInterfaceNilCalled func() bool + SetEpochForPutOperationCalled func(uint32) + ShouldTakeSnapshotCalled func() bool + GetLatestStorageEpochCalled func() (uint32, error) + IsClosedCalled func() bool + GetBaseTrieStorageManagerCalled func() common.StorageManager + GetIdentifierCalled func() string + CloseCalled func() error + RemoveFromAllActiveEpochsCalled func(hash []byte) error + IsSnapshotSupportedCalled func() bool + GetStateStatsHandlerCalled func() common.StateStatisticsHandler } // Put - diff --git a/testscommon/tableDisplayerMock.go b/testscommon/tableDisplayerMock.go new file mode 100644 index 00000000000..813c3e11fc5 --- /dev/null +++ b/testscommon/tableDisplayerMock.go @@ -0,0 +1,19 @@ +package testscommon + +import "github.com/multiversx/mx-chain-core-go/display" + +// TableDisplayerMock - +type TableDisplayerMock struct { + DisplayTableCalled func(tableHeader []string, lines []*display.LineData, message string) +} + +// DisplayTable - +func (mock *TableDisplayerMock) DisplayTable(tableHeader []string, lines []*display.LineData, message string) { + if mock.DisplayTableCalled != nil { + mock.DisplayTableCalled(tableHeader, lines, message) + } +} + +func (mock *TableDisplayerMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/testConfigs.go b/testscommon/testConfigs.go new file mode 100644 index 00000000000..fc0840e5237 --- /dev/null +++ b/testscommon/testConfigs.go @@ -0,0 +1,36 @@ +package testscommon + +import "github.com/multiversx/mx-chain-go/config" + +// GetDefaultRoundsConfig - +func GetDefaultRoundsConfig() config.RoundConfig { + return config.RoundConfig{ + RoundActivations: map[string]config.ActivationRoundByName{ + "DisableAsyncCallV1": { + Round: "18446744073709551615", + }, + }, + } +} + +// GetDefaultHeaderVersionConfig - +func GetDefaultHeaderVersionConfig() config.VersionsConfig { + return config.VersionsConfig{ + DefaultVersion: "default", + VersionsByEpochs: []config.VersionByEpochs{ + { + StartEpoch: 0, + Version: "*", + }, + { + StartEpoch: 1, + Version: "2", + }, + }, + Cache: config.CacheConfig{ + Name: "VersionsCache", + Type: "LRU", + Capacity: 100, + }, + } +} diff --git a/testscommon/toml/config.go b/testscommon/toml/config.go new file mode 100644 index 00000000000..56cfeb1f0ad --- /dev/null +++ b/testscommon/toml/config.go @@ -0,0 +1,182 @@ +package toml + +// Config will hold the testing configuration parameters +type Config struct { + TestConfigI8 + TestConfigI16 + TestConfigI32 + TestConfigI64 + TestConfigU8 + TestConfigU16 + TestConfigU32 + TestConfigU64 + TestConfigF32 + TestConfigF64 + TestConfigStruct + TestConfigNestedStruct + TestMap + TestInterface +} + +// TestConfigI8 will hold an int8 value for testing +type TestConfigI8 struct { + Int8 Int8 +} + +// Int8 will hold the value +type Int8 struct { + Number int8 +} + +// TestConfigI16 will hold an int16 value for testing +type TestConfigI16 struct { + Int16 +} + +// Int16 will hold the value +type Int16 struct { + Number int16 +} + +// TestConfigI32 will hold an int32 value for testing +type TestConfigI32 struct { + Int32 +} + +// Int32 will hold the value +type Int32 struct { + Number int32 +} + +// TestConfigI64 will hold an int64 value for testing +type TestConfigI64 struct { + Int64 +} + +// Int64 will hold the value +type Int64 struct { + Number int64 +} + +// TestConfigU8 will hold an uint8 value for testing +type TestConfigU8 struct { + Uint8 +} + +// Uint8 will hold the value +type Uint8 struct { + Number uint8 +} + +// TestConfigU16 will hold an uint16 value for testing +type TestConfigU16 struct { + Uint16 +} + +// Uint16 will hold the value +type Uint16 struct { + Number uint16 +} + +// TestConfigU32 will hold an uint32 value for testing +type TestConfigU32 struct { + Uint32 +} + +// Uint32 will hold the value +type Uint32 struct { + Number uint32 +} + +// TestConfigU64 will hold an uint64 value for testing +type TestConfigU64 struct { + Uint64 +} + +// Uint64 will hold the value +type Uint64 struct { + Number uint64 +} + +// TestConfigF32 will hold a float32 value for testing +type TestConfigF32 struct { + Float32 +} + +// Float32 will hold the value +type Float32 struct { + Number float32 +} + +// TestConfigF64 will hold a float64 value for testing +type TestConfigF64 struct { + Float64 +} + +// Float64 will hold the value +type Float64 struct { + Number float64 +} + +// TestConfigStruct will hold a configuration struct for testing +type TestConfigStruct struct { + ConfigStruct +} + +// ConfigStruct will hold a struct for testing +type ConfigStruct struct { + Title string + Description +} + +// Description will hold the number +type Description struct { + Number uint32 +} + +// TestConfigNestedStruct will hold a configuration with nested struct for testing +type TestConfigNestedStruct struct { + ConfigNestedStruct +} + +// ConfigNestedStruct will hold a nested struct for testing +type ConfigNestedStruct struct { + Text string + Message +} + +// Message will hold some details +type Message struct { + Public bool + MessageDescription []MessageDescription +} + +// MessageDescription will hold the text +type MessageDescription struct { + Text string +} + +// MessageDescriptionOtherType will hold the text as integer +type MessageDescriptionOtherType struct { + Text int +} + +// MessageDescriptionOtherName will hold the value +type MessageDescriptionOtherName struct { + Value string +} + +// TestMap will hold a map for testing +type TestMap struct { + Map map[string]MapValues +} + +// MapValues will hold a value for map +type MapValues struct { + Number int +} + +// TestInterface will hold an interface for testing +type TestInterface struct { + Value interface{} +} diff --git a/testscommon/toml/config.toml b/testscommon/toml/config.toml new file mode 100644 index 00000000000..91512d5e664 --- /dev/null +++ b/testscommon/toml/config.toml @@ -0,0 +1,53 @@ +[TestConfigI8] + [TestConfigI8.Int8] + Value = -8 + +[TestConfigI16] + [TestConfigI16.Int16] + Value = -16 + +[TestConfigI32] + [TestConfigI8.Int32] + Value = -32 + +[TestConfigI64] + [TestConfigI64.Int64] + Value = -64 + +[TestConfigU8] + [TestConfigU8.Uint8] + Value = 8 + +[TestConfigU16] + [TestConfigU16.Uint16] + Value = 16 + +[TestConfigU32] + [TestConfigU32.Uint32] + Value = 32 + +[TestConfigU64] + [TestConfigU64.Uint64] + Value = 64 + +[TestConfigF32] + [TestConfigF32.Float32] + Value = -32.32 + +[TestConfigF64] + [TestConfigF64.Float64] + Value = 64.64 + +[TestConfigStruct] + [TestConfigStruct.ConfigStruct] + Title = "Config Struct" + Description = { Number = 32 } + +[TestConfigNestedStruct] + [TestConfigNestedStruct.ConfigNestedStruct] + Text = "Config Nested Struct" + Mesage = { Public = true, MessageDescription = [{ Text = "Text1" }, { Text = "Text2"}] } + +[Map] + [Map.Key1] + Number = 999 diff --git a/testscommon/toml/overwrite.toml b/testscommon/toml/overwrite.toml new file mode 100644 index 00000000000..63f74b7828c --- /dev/null +++ b/testscommon/toml/overwrite.toml @@ -0,0 +1,40 @@ +OverridableConfigTomlValues = [ + { File = "config.toml", Path = "TestConfigI8.Int8.Number", Value = 127 }, + { File = "config.toml", Path = "TestConfigI8.Int8.Number", Value = 128 }, + { File = "config.toml", Path = "TestConfigI8.Int8.Number", Value = -128 }, + { File = "config.toml", Path = "TestConfigI8.Int8.Number", Value = -129 }, + { File = "config.toml", Path = "TestConfigI16.Int16.Number", Value = 32767 }, + { File = "config.toml", Path = "TestConfigI16.Int16.Number", Value = 32768 }, + { File = "config.toml", Path = "TestConfigI16.Int16.Number", Value = -32768 }, + { File = "config.toml", Path = "TestConfigI16.Int16.Number", Value = -32769 }, + { File = "config.toml", Path = "TestConfigI32.Int32.Number", Value = 2147483647 }, + { File = "config.toml", Path = "TestConfigI32.Int32.Number", Value = 2147483648 }, + { File = "config.toml", Path = "TestConfigI32.Int32.Number", Value = -2147483648 }, + { File = "config.toml", Path = "TestConfigI32.Int32.Number", Value = -2147483649 }, + { File = "config.toml", Path = "TestConfigI64.Int64.Number", Value = 9223372036854775807 }, + { File = "config.toml", Path = "TestConfigI64.Int64.Number", Value = -9223372036854775808 }, + { File = "config.toml", Path = "TestConfigU8.Uint8.Number", Value = 255 }, + { File = "config.toml", Path = "TestConfigU8.Uint8.Number", Value = 256 }, + { File = "config.toml", Path = "TestConfigU8.Uint8.Number", Value = -256 }, + { File = "config.toml", Path = "TestConfigU16.Uint16.Number", Value = 65535 }, + { File = "config.toml", Path = "TestConfigU16.Uint16.Number", Value = 65536 }, + { File = "config.toml", Path = "TestConfigU16.Uint16.Number", Value = -65536 }, + { File = "config.toml", Path = "TestConfigU32.Uint32.Number", Value = 4294967295 }, + { File = "config.toml", Path = "TestConfigU32.Uint32.Number", Value = 4294967296 }, + { File = "config.toml", Path = "TestConfigU32.Uint32.Number", Value = -4294967296 }, + { File = "config.toml", Path = "TestConfigU64.Uint64.Number", Value = 9223372036854775807 }, + { File = "config.toml", Path = "TestConfigU64.Uint64.Number", Value = -9223372036854775808 }, + { File = "config.toml", Path = "TestConfigF32.Float32.Number", Value = 3.4 }, + { File = "config.toml", Path = "TestConfigF32.Float32.Number", Value = 3.4e+39 }, + { File = "config.toml", Path = "TestConfigF32.Float32.Number", Value = -3.4 }, + { File = "config.toml", Path = "TestConfigF32.Float32.Number", Value = -3.4e+40 }, + { File = "config.toml", Path = "TestConfigF64.Float64.Number", Value = 1.7e+308 }, + { File = "config.toml", Path = "TestConfigF64.Float64.Number", Value = -1.7e+308 }, + { File = "config.toml", Path = "TestConfigStruct.ConfigStruct.Description", Value = { Number = 11 } }, + { File = "config.toml", Path = "TestConfigStruct.ConfigStruct.Description", Value = { Nr = 222 } }, + { File = "config.toml", Path = "TestConfigStruct.ConfigStruct.Description", Value = { Number = "11" } }, + { File = "config.toml", Path = "TestConfigNestedStruct.ConfigNestedStruct", Value = { Text = "Overwritten text", Message = { Public = false, MessageDescription = [{ Text = "Overwritten Text1" }] } } }, + { File = "config.toml", Path = "TestConfigNestedStruct.ConfigNestedStruct.Message.MessageDescription", Value = [{ Text = "Overwritten Text1" }, { Text = "Overwritten Text2" }] }, + { File = "config.toml", Path = "TestMap.Map", Value = { "Key1" = { Number = 10 }, "Key2" = { Number = 11 } } }, + { File = "config.toml", Path = "TestMap.Map", Value = { "Key2" = { Number = 2 }, "Key3" = { Number = 3 } } }, +] diff --git a/testscommon/toml/overwriteConfig.go b/testscommon/toml/overwriteConfig.go new file mode 100644 index 00000000000..68deb6f9dd5 --- /dev/null +++ b/testscommon/toml/overwriteConfig.go @@ -0,0 +1,8 @@ +package toml + +import "github.com/multiversx/mx-chain-go/config" + +// OverrideConfig holds an array of configs to be overridden +type OverrideConfig struct { + OverridableConfigTomlValues []config.OverridableConfig +} diff --git a/testscommon/transactionCoordinatorMock.go b/testscommon/transactionCoordinatorMock.go index e5a52257c67..a1889b0b753 100644 --- a/testscommon/transactionCoordinatorMock.go +++ b/testscommon/transactionCoordinatorMock.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/processedMb" ) @@ -29,10 +30,12 @@ type TransactionCoordinatorMock struct { VerifyCreatedBlockTransactionsCalled func(hdr data.HeaderHandler, body *block.Body) error CreatePostProcessMiniBlocksCalled func() block.MiniBlockSlice VerifyCreatedMiniBlocksCalled func(hdr data.HeaderHandler, body *block.Body) error - AddIntermediateTransactionsCalled func(mapSCRs map[block.Type][]data.TransactionHandler) error + AddIntermediateTransactionsCalled func(mapSCRs map[block.Type][]data.TransactionHandler, key []byte) error GetAllIntermediateTxsCalled func() map[block.Type]map[string]data.TransactionHandler AddTxsFromMiniBlocksCalled func(miniBlocks block.MiniBlockSlice) AddTransactionsCalled func(txHandlers []data.TransactionHandler, blockType block.Type) + + miniBlocks []*block.MiniBlock } // GetAllCurrentLogs - @@ -45,7 +48,7 @@ func (tcm *TransactionCoordinatorMock) CreatePostProcessMiniBlocks() block.MiniB if tcm.CreatePostProcessMiniBlocksCalled != nil { return tcm.CreatePostProcessMiniBlocksCalled() } - return nil + return tcm.miniBlocks } // CreateReceiptsHash - @@ -213,12 +216,12 @@ func (tcm *TransactionCoordinatorMock) VerifyCreatedMiniBlocks(hdr data.HeaderHa } // AddIntermediateTransactions - -func (tcm *TransactionCoordinatorMock) AddIntermediateTransactions(mapSCRs map[block.Type][]data.TransactionHandler) error { +func (tcm *TransactionCoordinatorMock) AddIntermediateTransactions(mapSCRs map[block.Type][]data.TransactionHandler, key []byte) error { if tcm.AddIntermediateTransactionsCalled == nil { return nil } - return tcm.AddIntermediateTransactionsCalled(mapSCRs) + return tcm.AddIntermediateTransactionsCalled(mapSCRs, key) } // GetAllIntermediateTxs - @@ -233,6 +236,7 @@ func (tcm *TransactionCoordinatorMock) GetAllIntermediateTxs() map[block.Type]ma // AddTxsFromMiniBlocks - func (tcm *TransactionCoordinatorMock) AddTxsFromMiniBlocks(miniBlocks block.MiniBlockSlice) { if tcm.AddTxsFromMiniBlocksCalled == nil { + tcm.miniBlocks = append(tcm.miniBlocks, miniBlocks...) return } @@ -248,6 +252,10 @@ func (tcm *TransactionCoordinatorMock) AddTransactions(txHandlers []data.Transac tcm.AddTransactionsCalled(txHandlers, blockType) } +func (tcm *TransactionCoordinatorMock) ClearStoredMbs() { + tcm.miniBlocks = make([]*block.MiniBlock, 0) +} + // IsInterfaceNil returns true if there is no value under the interface func (tcm *TransactionCoordinatorMock) IsInterfaceNil() bool { return tcm == nil diff --git a/testscommon/trie/trieStub.go b/testscommon/trie/trieStub.go index 81c90867e92..30e0ba6066e 100644 --- a/testscommon/trie/trieStub.go +++ b/testscommon/trie/trieStub.go @@ -19,8 +19,7 @@ type TrieStub struct { DeleteCalled func(key []byte) error RootCalled func() ([]byte, error) CommitCalled func() error - RecreateCalled func(root []byte) (common.Trie, error) - RecreateFromEpochCalled func(options common.RootHashHolder) (common.Trie, error) + RecreateCalled func(options common.RootHashHolder) (common.Trie, error) GetObsoleteHashesCalled func() [][]byte AppendToOldHashesCalled func([][]byte) GetSerializedNodesCalled func([]byte, uint64) ([][]byte, uint64, error) @@ -136,18 +135,9 @@ func (ts *TrieStub) Commit() error { } // Recreate - -func (ts *TrieStub) Recreate(root []byte) (common.Trie, error) { +func (ts *TrieStub) Recreate(options common.RootHashHolder) (common.Trie, error) { if ts.RecreateCalled != nil { - return ts.RecreateCalled(root) - } - - return nil, errNotImplemented -} - -// RecreateFromEpoch - -func (ts *TrieStub) RecreateFromEpoch(options common.RootHashHolder) (common.Trie, error) { - if ts.RecreateFromEpochCalled != nil { - return ts.RecreateFromEpochCalled(options) + return ts.RecreateCalled(options) } return nil, errNotImplemented diff --git a/testscommon/txDataBuilder/builder.go b/testscommon/txDataBuilder/builder.go index 49916cd5a1c..3198792ac57 100644 --- a/testscommon/txDataBuilder/builder.go +++ b/testscommon/txDataBuilder/builder.go @@ -5,6 +5,7 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) // TxDataBuilder constructs a string to be used for transaction arguments @@ -176,11 +177,20 @@ func (builder *TxDataBuilder) TransferESDT(token string, value int64) *TxDataBui return builder.Func(core.BuiltInFunctionESDTTransfer).Str(token).Int64(value) } -//TransferESDTNFT appends to the data string all the elements required to request an ESDT NFT transfer. +// TransferESDTNFT appends to the data string all the elements required to request an ESDT NFT transfer. func (builder *TxDataBuilder) TransferESDTNFT(token string, nonce int, value int64) *TxDataBuilder { return builder.Func(core.BuiltInFunctionESDTNFTTransfer).Str(token).Int(nonce).Int64(value) } +// MultiTransferESDTNFT appends to the data string all the elements required to request an Multi ESDT NFT transfer. +func (builder *TxDataBuilder) MultiTransferESDTNFT(destinationAddress []byte, transfers []*vmcommon.ESDTTransfer) *TxDataBuilder { + txBuilder := builder.Func(core.BuiltInFunctionMultiESDTNFTTransfer).Bytes(destinationAddress).Int(len(transfers)) + for _, transfer := range transfers { + txBuilder.Bytes(transfer.ESDTTokenName).Int(int(transfer.ESDTTokenNonce)).BigInt(transfer.ESDTValue) + } + return txBuilder +} + // BurnESDT appends to the data string all the elements required to burn ESDT tokens. func (builder *TxDataBuilder) BurnESDT(token string, value int64) *TxDataBuilder { return builder.Func(core.BuiltInFunctionESDTBurn).Str(token).Int64(value) diff --git a/process/mock/validatorStatisticsProcessorStub.go b/testscommon/validatorStatisticsProcessorStub.go similarity index 84% rename from process/mock/validatorStatisticsProcessorStub.go rename to testscommon/validatorStatisticsProcessorStub.go index b3e4f947da0..4d588610d31 100644 --- a/process/mock/validatorStatisticsProcessorStub.go +++ b/testscommon/validatorStatisticsProcessorStub.go @@ -1,4 +1,4 @@ -package mock +package testscommon import ( "github.com/multiversx/mx-chain-core-go/data" @@ -12,23 +12,15 @@ type ValidatorStatisticsProcessorStub struct { GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) RootHashCalled func() ([]byte, error) LastFinalizedRootHashCalled func() []byte - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error + ResetValidatorStatisticsAtNewEpochCalled func(vInfos state.ShardValidatorsInfoMapHandler) error + GetValidatorInfoForRootHashCalled func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) + ProcessRatingsEndOfEpochCalled func(validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32) error ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error CommitCalled func() ([]byte, error) PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) } -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - // PeerAccountToValidatorInfo - func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { if vsp.PeerAccountToValidatorInfoCalled != nil { @@ -56,7 +48,7 @@ func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { } // ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { +func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos state.ShardValidatorsInfoMapHandler) error { if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) } @@ -64,19 +56,11 @@ func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch( } // GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { +func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { if vsp.GetValidatorInfoForRootHashCalled != nil { return vsp.GetValidatorInfoForRootHashCalled(rootHash) } - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil + return state.NewShardValidatorsInfoMap(), nil } // UpdatePeerState - @@ -87,6 +71,14 @@ func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHea return nil, nil } +// ProcessRatingsEndOfEpoch - +func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32) error { + if vsp.ProcessRatingsEndOfEpochCalled != nil { + return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) + } + return nil +} + // RevertPeerState - func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { if vsp.RevertPeerStateCalled != nil { @@ -103,8 +95,20 @@ func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { return nil, nil } -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { +// SetLastFinalizedRootHash - +func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { +} + +// LastFinalizedRootHash - +func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { + if vsp.LastFinalizedRootHashCalled != nil { + return vsp.LastFinalizedRootHashCalled() + } + return nil +} + +// GetPeerAccount - +func (vsp *ValidatorStatisticsProcessorStub) GetPeerAccount(address []byte) (state.PeerAccountHandler, error) { if vsp.GetPeerAccountCalled != nil { return vsp.GetPeerAccountCalled(address) } @@ -116,19 +120,15 @@ func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []by func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { } -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - if vsp.LastFinalizedRootHashCalled != nil { - return vsp.LastFinalizedRootHashCalled() +// SaveNodesCoordinatorUpdates - +func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { + if vsp.SaveNodesCoordinatorUpdatesCalled != nil { + return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) } - return nil + return false, nil } // IsInterfaceNil - func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false + return vsp == nil } diff --git a/testscommon/vmcommonMocks/userAccountStub.go b/testscommon/vmcommonMocks/userAccountStub.go index 6fb0b1f4d85..5e2357c491b 100644 --- a/testscommon/vmcommonMocks/userAccountStub.go +++ b/testscommon/vmcommonMocks/userAccountStub.go @@ -14,6 +14,7 @@ type UserAccountStub struct { GetRootHashCalled func() []byte AccountDataHandlerCalled func() vmcommon.AccountDataHandler AddToBalanceCalled func(value *big.Int) error + SubFromBalanceCalled func(value *big.Int) error GetBalanceCalled func() *big.Int ClaimDeveloperRewardsCalled func([]byte) (*big.Int, error) GetDeveloperRewardCalled func() *big.Int @@ -74,6 +75,14 @@ func (uas *UserAccountStub) AddToBalance(value *big.Int) error { return nil } +// SubFromBalance - +func (uas *UserAccountStub) SubFromBalance(value *big.Int) error { + if uas.SubFromBalanceCalled != nil { + return uas.SubFromBalanceCalled(value) + } + return nil +} + // GetBalance - func (uas *UserAccountStub) GetBalance() *big.Int { if uas.GetBalanceCalled != nil { @@ -159,7 +168,7 @@ func (uas *UserAccountStub) GetNonce() uint64 { return 0 } -//IsInterfaceNil - +// IsInterfaceNil - func (uas *UserAccountStub) IsInterfaceNil() bool { return uas == nil } diff --git a/trie/depthFirstSync_test.go b/trie/depthFirstSync_test.go index 456c1b1f3e8..409c618a2c6 100644 --- a/trie/depthFirstSync_test.go +++ b/trie/depthFirstSync_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -121,7 +122,7 @@ func TestDepthFirstTrieSyncer_StartSyncingNewTrieShouldWork(t *testing.T) { tsm, _ := arg.DB.(*trieStorageManager) db, _ := tsm.mainStorer.(storage.Persister) trie, _ := createInMemoryTrieFromDB(db) - trie, _ = trie.Recreate(roothash) + trie, _ = trie.Recreate(holders.NewDefaultRootHashesHolder(roothash)) require.False(t, check.IfNil(trie)) var val []byte @@ -198,7 +199,7 @@ func TestDepthFirstTrieSyncer_StartSyncingPartiallyFilledTrieShouldWork(t *testi tsm, _ := arg.DB.(*trieStorageManager) db, _ := tsm.mainStorer.(storage.Persister) trie, _ := createInMemoryTrieFromDB(db) - trie, _ = trie.Recreate(roothash) + trie, _ = trie.Recreate(holders.NewDefaultRootHashesHolder(roothash)) require.False(t, check.IfNil(trie)) var val []byte diff --git a/trie/doubleListSync_test.go b/trie/doubleListSync_test.go index e4d737cf8f0..8e631237cc6 100644 --- a/trie/doubleListSync_test.go +++ b/trie/doubleListSync_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" @@ -201,7 +202,7 @@ func TestDoubleListTrieSyncer_StartSyncingNewTrieShouldWork(t *testing.T) { tsm, _ := arg.DB.(*trieStorageManager) db, _ := tsm.mainStorer.(storage.Persister) trie, _ := createInMemoryTrieFromDB(db) - trie, _ = trie.Recreate(roothash) + trie, _ = trie.Recreate(holders.NewDefaultRootHashesHolder(roothash)) require.False(t, check.IfNil(trie)) var val []byte @@ -278,7 +279,7 @@ func TestDoubleListTrieSyncer_StartSyncingPartiallyFilledTrieShouldWork(t *testi tsm, _ := arg.DB.(*trieStorageManager) db, _ := tsm.mainStorer.(storage.Persister) trie, _ := createInMemoryTrieFromDB(db) - trie, _ = trie.Recreate(roothash) + trie, _ = trie.Recreate(holders.NewDefaultRootHashesHolder(roothash)) require.False(t, check.IfNil(trie)) var val []byte diff --git a/trie/extensionNode_test.go b/trie/extensionNode_test.go index ffc46b7d6b0..5ae78db766a 100644 --- a/trie/extensionNode_test.go +++ b/trie/extensionNode_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/storage/cache" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -732,7 +733,7 @@ func TestExtensionNode_reduceNodeCollapsedNode(t *testing.T) { tr := initTrie() _ = tr.Commit() rootHash, _ := tr.RootHash() - collapsedTrie, _ := tr.Recreate(rootHash) + collapsedTrie, _ := tr.Recreate(holders.NewDefaultRootHashesHolder(rootHash)) err := collapsedTrie.Delete([]byte("doe")) assert.Nil(t, err) diff --git a/trie/node.go b/trie/node.go index 6d82a238e95..754b3b3548d 100644 --- a/trie/node.go +++ b/trie/node.go @@ -152,7 +152,7 @@ func resolveIfCollapsed(n node, pos byte, db common.TrieStorageInteractor) error func handleStorageInteractorStats(db common.TrieStorageInteractor) { if db != nil { - db.GetStateStatsHandler().IncrTrie() + db.GetStateStatsHandler().IncrementTrie() } } diff --git a/trie/node_extension.go b/trie/node_extension.go index 4e7b38a6a7d..ffbdab699ad 100644 --- a/trie/node_extension.go +++ b/trie/node_extension.go @@ -26,8 +26,8 @@ func shouldTestNode(n node, key []byte) bool { } func snapshotGetTestPoint(key []byte, faultyChance int) error { - rand.Seed(time.Now().UnixNano()) - checkVal := rand.Intn(math.MaxInt) + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + checkVal := rnd.Intn(math.MaxInt) if checkVal%faultyChance == 0 { log.Debug("deliberately not returning hash", "hash", key) return fmt.Errorf("snapshot get error") diff --git a/trie/patriciaMerkleTrie.go b/trie/patriciaMerkleTrie.go index 485b01bf199..70df549011f 100644 --- a/trie/patriciaMerkleTrie.go +++ b/trie/patriciaMerkleTrie.go @@ -269,13 +269,8 @@ func (tr *patriciaMerkleTrie) Commit() error { return nil } -// Recreate returns a new trie that has the given root hash and database -func (tr *patriciaMerkleTrie) Recreate(root []byte) (common.Trie, error) { - return tr.recreate(root, tr.trieStorage) -} - -// RecreateFromEpoch returns a new trie, given the options -func (tr *patriciaMerkleTrie) RecreateFromEpoch(options common.RootHashHolder) (common.Trie, error) { +// Recreate returns a new trie, given the options +func (tr *patriciaMerkleTrie) Recreate(options common.RootHashHolder) (common.Trie, error) { if check.IfNil(options) { return nil, ErrNilRootHashHolder } @@ -399,6 +394,12 @@ func (tr *patriciaMerkleTrie) recreateFromDb(rootHash []byte, tsm common.Storage // GetSerializedNode returns the serialized node (if existing) provided the node's hash func (tr *patriciaMerkleTrie) GetSerializedNode(hash []byte) ([]byte, error) { + // TODO: investigate if we can move the critical section behavior in the trie node resolver as this call will compete with a normal trie.Get operation + // which might occur during processing. + // warning: A critical section here or on the trie node resolver must be kept as to not overwhelm the node with requests that affects the block processing flow + tr.mutOperation.Lock() + defer tr.mutOperation.Unlock() + log.Trace("GetSerializedNode", "hash", hash) return tr.trieStorage.Get(hash) @@ -406,6 +407,12 @@ func (tr *patriciaMerkleTrie) GetSerializedNode(hash []byte) ([]byte, error) { // GetSerializedNodes returns a batch of serialized nodes from the trie, starting from the given hash func (tr *patriciaMerkleTrie) GetSerializedNodes(rootHash []byte, maxBuffToSend uint64) ([][]byte, uint64, error) { + // TODO: investigate if we can move the critical section behavior in the trie node resolver as this call will compete with a normal trie.Get operation + // which might occur during processing. + // warning: A critical section here or on the trie node resolver must be kept as to not overwhelm the node with requests that affects the block processing flow + tr.mutOperation.Lock() + defer tr.mutOperation.Unlock() + log.Trace("GetSerializedNodes", "rootHash", rootHash) size := uint64(0) diff --git a/trie/patriciaMerkleTrie_test.go b/trie/patriciaMerkleTrie_test.go index 3443858e7e7..4612b8ee331 100644 --- a/trie/patriciaMerkleTrie_test.go +++ b/trie/patriciaMerkleTrie_test.go @@ -9,6 +9,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "testing" "time" @@ -22,7 +23,7 @@ import ( errorsCommon "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/state/parsers" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" - "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/keyBuilder" @@ -388,27 +389,12 @@ func TestPatriciaMerkleTree_DeleteNotPresent(t *testing.T) { func TestPatriciaMerkleTrie_Recreate(t *testing.T) { t.Parallel() - tr := initTrie() - rootHash, _ := tr.RootHash() - _ = tr.Commit() - - newTr, err := tr.Recreate(rootHash) - assert.Nil(t, err) - assert.NotNil(t, newTr) - - root, _ := newTr.RootHash() - assert.Equal(t, rootHash, root) -} - -func TestPatriciaMerkleTrie_RecreateFromEpoch(t *testing.T) { - t.Parallel() - t.Run("nil options", func(t *testing.T) { t.Parallel() tr := initTrie() - newTr, err := tr.RecreateFromEpoch(nil) + newTr, err := tr.Recreate(nil) assert.Nil(t, newTr) assert.Equal(t, trie.ErrNilRootHashHolder, err) }) @@ -420,8 +406,8 @@ func TestPatriciaMerkleTrie_RecreateFromEpoch(t *testing.T) { rootHash, _ := tr.RootHash() _ = tr.Commit() - rootHashHolder := holders.NewRootHashHolder(rootHash, core.OptionalUint32{}) - newTr, err := tr.RecreateFromEpoch(rootHashHolder) + rootHashHolder := holders.NewDefaultRootHashesHolder(rootHash) + newTr, err := tr.Recreate(rootHashHolder) assert.Nil(t, err) assert.True(t, trie.IsBaseTrieStorageManager(newTr.GetStorageManager())) @@ -439,7 +425,7 @@ func TestPatriciaMerkleTrie_RecreateFromEpoch(t *testing.T) { HasValue: true, } rootHashHolder := holders.NewRootHashHolder(rootHash, optionalUint32) - newTr, err := tr.RecreateFromEpoch(rootHashHolder) + newTr, err := tr.Recreate(rootHashHolder) assert.Nil(t, err) assert.True(t, trie.IsTrieStorageManagerInEpoch(newTr.GetStorageManager())) @@ -451,7 +437,7 @@ func TestPatriciaMerkleTrie_RecreateWithInvalidRootHash(t *testing.T) { tr := initTrie() - newTr, err := tr.Recreate(nil) + newTr, err := tr.Recreate(holders.NewDefaultRootHashesHolder([]byte{})) assert.Nil(t, err) root, _ := newTr.RootHash() assert.Equal(t, emptyTrieHash, root) @@ -966,7 +952,7 @@ func TestPatriciaMerkleTrie_ConcurrentOperations(t *testing.T) { numOperations := 1000 wg := sync.WaitGroup{} wg.Add(numOperations) - numFunctions := 19 + numFunctions := 18 initialRootHash, _ := tr.RootHash() @@ -992,31 +978,28 @@ func TestPatriciaMerkleTrie_ConcurrentOperations(t *testing.T) { err := tr.Commit() assert.Nil(t, err) case 5: - _, err := tr.Recreate(initialRootHash) - assert.Nil(t, err) - case 6: epoch := core.OptionalUint32{ Value: 3, HasValue: true, } rootHashHolder := holders.NewRootHashHolder(initialRootHash, epoch) - _, err := tr.RecreateFromEpoch(rootHashHolder) + _, err := tr.Recreate(rootHashHolder) assert.Nil(t, err) - case 7: + case 6: _ = tr.String() - case 8: + case 7: _ = tr.GetObsoleteHashes() - case 9: + case 8: _, err := tr.GetDirtyHashes() assert.Nil(t, err) - case 10: + case 9: _, err := tr.GetSerializedNode(initialRootHash) assert.Nil(t, err) - case 11: + case 10: size1KB := uint64(1024 * 1024) _, _, err := tr.GetSerializedNodes(initialRootHash, size1KB) assert.Nil(t, err) - case 12: + case 11: trieIteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, 1000), ErrChan: errChan.NewErrChanWrapper(), @@ -1030,20 +1013,20 @@ func TestPatriciaMerkleTrie_ConcurrentOperations(t *testing.T) { parsers.NewMainTrieLeafParser(), ) assert.Nil(t, err) - case 13: + case 12: _, err := tr.GetAllHashes() assert.Nil(t, err) - case 14: + case 13: _, _, _ = tr.GetProof(initialRootHash) // this might error due to concurrent operations that change the roothash - case 15: + case 14: // extremely hard to compute an existing hash due to concurrent changes. _, _ = tr.VerifyProof([]byte("dog"), []byte("puppy"), [][]byte{[]byte("proof1")}) // this might error due to concurrent operations that change the roothash - case 16: + case 15: sm := tr.GetStorageManager() assert.NotNil(t, sm) - case 17: + case 16: _ = tr.GetOldRoot() - case 18: + case 17: trieStatsHandler := tr.(common.TrieStats) _, err := trieStatsHandler.GetTrieStats("address", initialRootHash) assert.Nil(t, err) @@ -1058,64 +1041,56 @@ func TestPatriciaMerkleTrie_ConcurrentOperations(t *testing.T) { wg.Wait() } -func TestPatriciaMerkleTrie_GetSerializedNodesClose(t *testing.T) { +func TestPatriciaMerkleTrie_GetSerializedNodesShouldSerializeTheCalls(t *testing.T) { t.Parallel() args := trie.GetDefaultTrieStorageManagerParameters() - args.MainStorer = &storage.StorerStub{ - GetCalled: func(key []byte) ([]byte, error) { - // gets take a long time + numConcurrentCalls := int32(0) + testTrieStorageManager := &storageManager.StorageManagerStub{ + GetCalled: func(bytes []byte) ([]byte, error) { + newValue := atomic.AddInt32(&numConcurrentCalls, 1) + defer atomic.AddInt32(&numConcurrentCalls, -1) + + assert.Equal(t, int32(1), newValue) + + // get takes a long time time.Sleep(time.Millisecond * 10) - return key, nil + + return bytes, nil }, } - trieStorageManager, _ := trie.NewTrieStorageManager(args) - tr, _ := trie.NewTrie(trieStorageManager, args.Marshalizer, args.Hasher, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, 5) - numGoRoutines := 1000 - wgStart := sync.WaitGroup{} - wgStart.Add(numGoRoutines) - wgEnd := sync.WaitGroup{} - wgEnd.Add(numGoRoutines) + tr, _ := trie.NewTrie(testTrieStorageManager, args.Marshalizer, args.Hasher, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, 5) + numGoRoutines := 100 + wg := sync.WaitGroup{} + wg.Add(numGoRoutines) for i := 0; i < numGoRoutines; i++ { if i%2 == 0 { go func() { time.Sleep(time.Millisecond * 100) - wgStart.Done() - _, _, _ = tr.GetSerializedNodes([]byte("dog"), 1024) - wgEnd.Done() + wg.Done() }() } else { go func() { time.Sleep(time.Millisecond * 100) - wgStart.Done() - _, _ = tr.GetSerializedNode([]byte("dog")) - wgEnd.Done() + wg.Done() }() } } - wgStart.Wait() + wg.Wait() chanClosed := make(chan struct{}) go func() { _ = tr.Close() close(chanClosed) }() - chanGetsEnded := make(chan struct{}) - go func() { - wgEnd.Wait() - close(chanGetsEnded) - }() - timeout := time.Second * 10 select { case <-chanClosed: // ok - case <-chanGetsEnded: - assert.Fail(t, "trie should have been closed before all gets ended") case <-time.After(timeout): assert.Fail(t, "timeout waiting for trie to be closed") } @@ -1391,7 +1366,7 @@ func TestPatriciaMerkleTrie_CollectLeavesForMigration(t *testing.T) { addDefaultDataToTrie(tr) _ = tr.Commit() rootHash, _ := tr.RootHash() - collapsedTrie, _ := tr.Recreate(rootHash) + collapsedTrie, _ := tr.Recreate(holders.NewDefaultRootHashesHolder(rootHash)) dtr := collapsedTrie.(dataTrie) dtm := &trieMock.DataTrieMigratorStub{ ConsumeStorageLoadGasCalled: func() bool { diff --git a/update/factory/dataTrieFactory.go b/update/factory/dataTrieFactory.go index f9491350693..dcd83da1bd7 100644 --- a/update/factory/dataTrieFactory.go +++ b/update/factory/dataTrieFactory.go @@ -67,8 +67,7 @@ func NewDataTrieFactory(args ArgsNewDataTrieFactory) (*dataTrieFactory, error) { dbConfig := storageFactory.GetDBFromConfig(args.StorageConfig.DB) dbConfig.FilePath = path.Join(args.SyncFolder, args.StorageConfig.DB.FilePath) - dbConfigHandler := factory.NewDBConfigHandler(args.StorageConfig.DB) - persisterFactory, err := factory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := factory.NewPersisterFactory(args.StorageConfig.DB) if err != nil { return nil, err } diff --git a/update/factory/exportHandlerFactory.go b/update/factory/exportHandlerFactory.go index 8dd429345bb..a8ed95f4ceb 100644 --- a/update/factory/exportHandlerFactory.go +++ b/update/factory/exportHandlerFactory.go @@ -18,6 +18,7 @@ import ( mxFactory "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/genesis/process/disabled" "github.com/multiversx/mx-chain-go/process" + processDisabled "github.com/multiversx/mx-chain-go/process/disabled" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" @@ -588,6 +589,7 @@ func (e *exportHandlerFactory) createInterceptors() error { FullArchiveInterceptorsContainer: e.fullArchiveInterceptorsContainer, AntifloodHandler: e.networkComponents.InputAntiFloodHandler(), NodeOperationMode: e.nodeOperationMode, + RelayedTxV3Processor: processDisabled.NewRelayedTxV3Processor(), } fullSyncInterceptors, err := NewFullSyncInterceptorsContainerFactory(argsInterceptors) if err != nil { @@ -608,8 +610,7 @@ func createStorer(storageConfig config.StorageConfig, folder string) (storage.St dbConfig := storageFactory.GetDBFromConfig(storageConfig.DB) dbConfig.FilePath = path.Join(folder, storageConfig.DB.FilePath) - dbConfigHandler := storageFactory.NewDBConfigHandler(storageConfig.DB) - persisterFactory, err := storageFactory.NewPersisterFactory(dbConfigHandler) + persisterFactory, err := storageFactory.NewPersisterFactory(storageConfig.DB) if err != nil { return nil, err } diff --git a/update/factory/fullSyncInterceptors.go b/update/factory/fullSyncInterceptors.go index 0fe0298c4d6..67d5a86a503 100644 --- a/update/factory/fullSyncInterceptors.go +++ b/update/factory/fullSyncInterceptors.go @@ -75,6 +75,7 @@ type ArgsNewFullSyncInterceptorsContainerFactory struct { FullArchiveInterceptorsContainer process.InterceptorsContainer AntifloodHandler process.P2PAntifloodHandler NodeOperationMode common.NodeOperation + RelayedTxV3Processor process.RelayedTxV3Processor } // NewFullSyncInterceptorsContainerFactory is responsible for creating a new interceptors factory object @@ -145,6 +146,7 @@ func NewFullSyncInterceptorsContainerFactory( EpochStartTrigger: args.EpochStartTrigger, WhiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, ArgsParser: smartContract.NewArgumentParser(), + RelayedTxV3Processor: args.RelayedTxV3Processor, } icf := &fullSyncInterceptorsContainerFactory{ diff --git a/update/genesis/common.go b/update/genesis/common.go index 2ce58de50af..d8d3b11ca0e 100644 --- a/update/genesis/common.go +++ b/update/genesis/common.go @@ -6,7 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" ) @@ -14,25 +14,20 @@ import ( // TODO: create a structure or use this function also in process/peer/process.go func getValidatorDataFromLeaves( leavesChannels *common.TrieIteratorChannels, - shardCoordinator sharding.Coordinator, marshalizer marshal.Marshalizer, -) (map[uint32][]*state.ValidatorInfo, error) { - - validators := make(map[uint32][]*state.ValidatorInfo, shardCoordinator.NumberOfShards()+1) - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - validators[i] = make([]*state.ValidatorInfo, 0) - } - validators[core.MetachainShardId] = make([]*state.ValidatorInfo, 0) - +) (state.ShardValidatorsInfoMapHandler, error) { + validators := state.NewShardValidatorsInfoMap() for pa := range leavesChannels.LeavesChan { peerAccount, err := unmarshalPeer(pa, marshalizer) if err != nil { return nil, err } - currentShardId := peerAccount.GetShardId() validatorInfoData := peerAccountToValidatorInfo(peerAccount) - validators[currentShardId] = append(validators[currentShardId], validatorInfoData) + err = validators.Add(validatorInfoData) + if err != nil { + return nil, err + } } err := leavesChannels.ErrChan.ReadFromChanNonBlocking() @@ -60,7 +55,9 @@ func peerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.Val PublicKey: peerAccount.AddressBytes(), ShardId: peerAccount.GetShardId(), List: getActualList(peerAccount), + PreviousList: peerAccount.GetPreviousList(), Index: peerAccount.GetIndexInList(), + PreviousIndex: peerAccount.GetPreviousIndexInList(), TempRating: peerAccount.GetTempRating(), Rating: peerAccount.GetRating(), RewardAddress: peerAccount.GetRewardAddress(), @@ -92,7 +89,7 @@ func getActualList(peerAccount state.PeerAccountHandler) string { return string(common.LeavingList) } -func shouldExportValidator(validator *state.ValidatorInfo, allowedLists []common.PeerType) bool { +func shouldExportValidator(validator state.ValidatorInfoHandler, allowedLists []common.PeerType) bool { validatorList := validator.GetList() for _, list := range allowedLists { diff --git a/update/genesis/export.go b/update/genesis/export.go index 0f5c469afc9..ba4e678a0f8 100644 --- a/update/genesis/export.go +++ b/update/genesis/export.go @@ -311,8 +311,7 @@ func (se *stateExport) exportTrie(key string, trie common.Trie) error { } if accType == ValidatorAccount { - var validatorData map[uint32][]*state.ValidatorInfo - validatorData, err = getValidatorDataFromLeaves(leavesChannels, se.shardCoordinator, se.marshalizer) + validatorData, err := getValidatorDataFromLeaves(leavesChannels, se.marshalizer) if err != nil { return err } @@ -443,30 +442,28 @@ func (se *stateExport) exportValidatorInfo(key string, validatorInfo *state.Shar return nil } -func (se *stateExport) exportNodesSetupJson(validators map[uint32][]*state.ValidatorInfo) error { +func (se *stateExport) exportNodesSetupJson(validators state.ShardValidatorsInfoMapHandler) error { acceptedListsForExport := []common.PeerType{common.EligibleList, common.WaitingList, common.JailedList} initialNodes := make([]*sharding.InitialNode, 0) - for _, validatorsInShard := range validators { - for _, validator := range validatorsInShard { - if shouldExportValidator(validator, acceptedListsForExport) { - - pubKey, err := se.validatorPubKeyConverter.Encode(validator.GetPublicKey()) - if err != nil { - return nil - } - - rewardAddress, err := se.addressPubKeyConverter.Encode(validator.GetRewardAddress()) - if err != nil { - return nil - } - - initialNodes = append(initialNodes, &sharding.InitialNode{ - PubKey: pubKey, - Address: rewardAddress, - InitialRating: validator.GetRating(), - }) + for _, validator := range validators.GetAllValidatorsInfo() { + if shouldExportValidator(validator, acceptedListsForExport) { + + pubKey, err := se.validatorPubKeyConverter.Encode(validator.GetPublicKey()) + if err != nil { + return nil + } + + rewardAddress, err := se.addressPubKeyConverter.Encode(validator.GetRewardAddress()) + if err != nil { + return nil } + + initialNodes = append(initialNodes, &sharding.InitialNode{ + PubKey: pubKey, + Address: rewardAddress, + InitialRating: validator.GetRating(), + }) } } diff --git a/update/genesis/export_test.go b/update/genesis/export_test.go index f1fca206504..bad77b07959 100644 --- a/update/genesis/export_test.go +++ b/update/genesis/export_test.go @@ -389,16 +389,17 @@ func TestStateExport_ExportNodesSetupJsonShouldExportKeysInAlphabeticalOrder(t * require.False(t, check.IfNil(stateExporter)) - vals := make(map[uint32][]*state.ValidatorInfo) - val50 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("aaa"), List: string(common.EligibleList)} - val51 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("bbb"), List: string(common.EligibleList)} - val10 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("ccc"), List: string(common.EligibleList)} - val11 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("ddd"), List: string(common.EligibleList)} - val00 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("aaaaaa"), List: string(common.EligibleList)} - val01 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("bbbbbb"), List: string(common.EligibleList)} - vals[1] = []*state.ValidatorInfo{val50, val51} - vals[0] = []*state.ValidatorInfo{val00, val01} - vals[2] = []*state.ValidatorInfo{val10, val11} + vals := state.NewShardValidatorsInfoMap() + val50 := &state.ValidatorInfo{ShardId: 0, PublicKey: []byte("aaa"), List: string(common.EligibleList)} + val51 := &state.ValidatorInfo{ShardId: 0, PublicKey: []byte("bbb"), List: string(common.EligibleList)} + val10 := &state.ValidatorInfo{ShardId: 1, PublicKey: []byte("ccc"), List: string(common.EligibleList)} + val11 := &state.ValidatorInfo{ShardId: 1, PublicKey: []byte("ddd"), List: string(common.EligibleList)} + val00 := &state.ValidatorInfo{ShardId: 2, PublicKey: []byte("aaaaaa"), List: string(common.EligibleList)} + val01 := &state.ValidatorInfo{ShardId: 2, PublicKey: []byte("bbbbbb"), List: string(common.EligibleList)} + _ = vals.SetValidatorsInShard(0, []state.ValidatorInfoHandler{val50, val51}) + _ = vals.SetValidatorsInShard(1, []state.ValidatorInfoHandler{val10, val11}) + _ = vals.SetValidatorsInShard(2, []state.ValidatorInfoHandler{val00, val01}) + err = stateExporter.exportNodesSetupJson(vals) require.Nil(t, err) diff --git a/update/mock/transactionCoordinatorMock.go b/update/mock/transactionCoordinatorMock.go index 07183d9467a..c0bb061a713 100644 --- a/update/mock/transactionCoordinatorMock.go +++ b/update/mock/transactionCoordinatorMock.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/processedMb" ) @@ -29,7 +30,7 @@ type TransactionCoordinatorMock struct { VerifyCreatedBlockTransactionsCalled func(hdr data.HeaderHandler, body *block.Body) error CreatePostProcessMiniBlocksCalled func() block.MiniBlockSlice VerifyCreatedMiniBlocksCalled func(hdr data.HeaderHandler, body *block.Body) error - AddIntermediateTransactionsCalled func(mapSCRs map[block.Type][]data.TransactionHandler) error + AddIntermediateTransactionsCalled func(mapSCRs map[block.Type][]data.TransactionHandler, key []byte) error GetAllIntermediateTxsCalled func() map[block.Type]map[string]data.TransactionHandler AddTxsFromMiniBlocksCalled func(miniBlocks block.MiniBlockSlice) AddTransactionsCalled func(txHandlers []data.TransactionHandler, blockType block.Type) @@ -204,12 +205,12 @@ func (tcm *TransactionCoordinatorMock) VerifyCreatedMiniBlocks(hdr data.HeaderHa } // AddIntermediateTransactions - -func (tcm *TransactionCoordinatorMock) AddIntermediateTransactions(mapSCRs map[block.Type][]data.TransactionHandler) error { +func (tcm *TransactionCoordinatorMock) AddIntermediateTransactions(mapSCRs map[block.Type][]data.TransactionHandler, key []byte) error { if tcm.AddIntermediateTransactionsCalled == nil { return nil } - return tcm.AddIntermediateTransactionsCalled(mapSCRs) + return tcm.AddIntermediateTransactionsCalled(mapSCRs, key) } // GetAllIntermediateTxs - diff --git a/vm/errors.go b/vm/errors.go index 341c26e49ad..f5c3638a624 100644 --- a/vm/errors.go +++ b/vm/errors.go @@ -31,9 +31,6 @@ var ErrInputCallerAddrIsNil = errors.New("input called address for system smart // ErrInputRecipientAddrIsNil signals that input recipient address for system smart contract is nil var ErrInputRecipientAddrIsNil = errors.New("input recipient address for system smart contract is nil") -// ErrInputAsyncParamsMissing signals that input does not contain async params -var ErrInputAsyncParamsMissing = errors.New("input does not contain async params") - // ErrNilBlockchainHook signals that blockchain hook is nil var ErrNilBlockchainHook = errors.New("blockchain hook is nil") @@ -267,3 +264,18 @@ var ErrWrongNewOwnerAddress = errors.New("wrong new owner address") // ErrInternalErrorWhileSettingNewOwner signals that an error occurred when setting the new contract owner var ErrInternalErrorWhileSettingNewOwner = errors.New("internal error when setting new contract owner") + +// ErrInvalidStakeLimitPercentage signals the invalid stake limit percentage was provided +var ErrInvalidStakeLimitPercentage = errors.New("invalid stake limit percentage") + +// ErrInvalidNodeLimitPercentage signals the invalid node limit percentage was provided +var ErrInvalidNodeLimitPercentage = errors.New("invalid node limit percentage") + +// ErrNilNodesCoordinator signals that nil nodes coordinator was provided +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") + +// ErrWaitingListDisabled signals that waiting list has been disabled, since staking v4 is active +var ErrWaitingListDisabled = errors.New("waiting list is disabled since staking v4 activation") + +// ErrCannotChangeToDynamic signals that tokenID cannot be change to type dynamic +var ErrCannotChangeToDynamic = errors.New("cannot change to dynamic because of duplicated roles") diff --git a/vm/factory/systemSCFactory.go b/vm/factory/systemSCFactory.go index 4d170119a6e..4ebd97a63b4 100644 --- a/vm/factory/systemSCFactory.go +++ b/vm/factory/systemSCFactory.go @@ -31,6 +31,7 @@ type systemSCFactory struct { addressPubKeyConverter core.PubkeyConverter shardCoordinator sharding.Coordinator enableEpochsHandler common.EnableEpochsHandler + nodesCoordinator vm.NodesCoordinator } // ArgsNewSystemSCFactory defines the arguments struct needed to create the system SCs @@ -46,6 +47,7 @@ type ArgsNewSystemSCFactory struct { AddressPubKeyConverter core.PubkeyConverter ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinator vm.NodesCoordinator } // NewSystemSCFactory creates a factory which will instantiate the system smart contracts @@ -80,6 +82,9 @@ func NewSystemSCFactory(args ArgsNewSystemSCFactory) (*systemSCFactory, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, fmt.Errorf("%w in NewSystemSCFactory", vm.ErrNilEnableEpochsHandler) } + if check.IfNil(args.NodesCoordinator) { + return nil, fmt.Errorf("%w in NewSystemSCFactory", vm.ErrNilNodesCoordinator) + } scf := &systemSCFactory{ systemEI: args.SystemEI, @@ -92,6 +97,7 @@ func NewSystemSCFactory(args ArgsNewSystemSCFactory) (*systemSCFactory, error) { addressPubKeyConverter: args.AddressPubKeyConverter, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, + nodesCoordinator: args.NodesCoordinator, } err := scf.createGasConfig(args.GasSchedule.LatestGasSchedule()) @@ -197,6 +203,7 @@ func (scf *systemSCFactory) createValidatorContract() (vm.SystemSmartContract, e GovernanceSCAddress: vm.GovernanceSCAddress, ShardCoordinator: scf.shardCoordinator, EnableEpochsHandler: scf.enableEpochsHandler, + NodesCoordinator: scf.nodesCoordinator, } validatorSC, err := systemSmartContracts.NewValidatorSmartContract(args) return validatorSC, err diff --git a/vm/factory/systemSCFactory_test.go b/vm/factory/systemSCFactory_test.go index 381ad93a01d..3ab4a639a4a 100644 --- a/vm/factory/systemSCFactory_test.go +++ b/vm/factory/systemSCFactory_test.go @@ -66,6 +66,8 @@ func createMockNewSystemScFactoryArgs() ArgsNewSystemSCFactory { MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationSystemSCConfig: config.DelegationSystemSCConfig{ MinServiceFee: 0, @@ -76,10 +78,17 @@ func createMockNewSystemScFactoryArgs() ArgsNewSystemSCFactory { MinStakeAmount: "10", ConfigChangeAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + NodesCoordinator: &mock.NodesCoordinatorStub{}, } } @@ -94,6 +103,17 @@ func TestNewSystemSCFactory_NilSystemEI(t *testing.T) { assert.True(t, errors.Is(err, vm.ErrNilSystemEnvironmentInterface)) } +func TestNewSystemSCFactory_NilNodesCoordinator(t *testing.T) { + t.Parallel() + + arguments := createMockNewSystemScFactoryArgs() + arguments.NodesCoordinator = nil + scFactory, err := NewSystemSCFactory(arguments) + + assert.Nil(t, scFactory) + assert.True(t, errors.Is(err, vm.ErrNilNodesCoordinator)) +} + func TestNewSystemSCFactory_NilSigVerifier(t *testing.T) { t.Parallel() diff --git a/vm/interface.go b/vm/interface.go index 02d78643821..ca8332c742f 100644 --- a/vm/interface.go +++ b/vm/interface.go @@ -37,7 +37,7 @@ type SystemSCContainer interface { type SystemEI interface { ExecuteOnDestContext(destination []byte, sender []byte, value *big.Int, input []byte) (*vmcommon.VMOutput, error) DeploySystemSC(baseContract []byte, newAddress []byte, ownerAddress []byte, initFunction string, value *big.Int, input [][]byte) (vmcommon.ReturnCode, error) - Transfer(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) error + Transfer(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) SendGlobalSettingToAll(sender []byte, input []byte) GetBalance(addr []byte) *big.Int SetStorage(key []byte, value []byte) @@ -60,6 +60,7 @@ type SystemEI interface { GetLogs() []*vmcommon.LogEntry SetOwnerOperatingOnAccount(newOwner []byte) error UpdateCodeDeployerAddress(scAddress string, newOwner []byte) error + ProcessBuiltInFunction(sender, destination []byte, function string, arguments [][]byte) (*vmcommon.VMOutput, error) IsInterfaceNil() bool } @@ -70,6 +71,12 @@ type EconomicsHandler interface { IsInterfaceNil() bool } +// NodesCoordinator defines the methods needed about nodes in system SCs from nodes coordinator +type NodesCoordinator interface { + GetNumTotalEligible() uint64 + IsInterfaceNil() bool +} + // ContextHandler defines the methods needed to execute system smart contracts type ContextHandler interface { SystemEI @@ -129,4 +136,5 @@ type BlockchainHook interface { GetSnapshot() int RevertToSnapshot(snapshot int) error IsBuiltinFunctionName(functionName string) bool + ProcessBuiltInFunction(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) } diff --git a/vm/mock/nodesCoordinatorStub.go b/vm/mock/nodesCoordinatorStub.go new file mode 100644 index 00000000000..de4a99e28e7 --- /dev/null +++ b/vm/mock/nodesCoordinatorStub.go @@ -0,0 +1,19 @@ +package mock + +// NodesCoordinatorStub - +type NodesCoordinatorStub struct { + GetNumTotalEligibleCalled func() uint64 +} + +// GetNumTotalEligible - +func (n *NodesCoordinatorStub) GetNumTotalEligible() uint64 { + if n.GetNumTotalEligibleCalled != nil { + return n.GetNumTotalEligibleCalled() + } + return 1000 +} + +// IsInterfaceNil - +func (n *NodesCoordinatorStub) IsInterfaceNil() bool { + return n == nil +} diff --git a/vm/mock/systemEIStub.go b/vm/mock/systemEIStub.go index 4162a34ab24..0c300010316 100644 --- a/vm/mock/systemEIStub.go +++ b/vm/mock/systemEIStub.go @@ -10,7 +10,7 @@ import ( // SystemEIStub - type SystemEIStub struct { - TransferCalled func(destination []byte, sender []byte, value *big.Int, input []byte) error + TransferCalled func(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) GetBalanceCalled func(addr []byte) *big.Int SetStorageCalled func(key []byte, value []byte) AddReturnMessageCalled func(msg string) @@ -37,6 +37,7 @@ type SystemEIStub struct { GasLeftCalled func() uint64 CleanStorageUpdatesCalled func() ReturnMessage string + ProcessBuiltInFunctionCalled func(sender, destination []byte, function string, arguments [][]byte) (*vmcommon.VMOutput, error) AddLogEntryCalled func(entry *vmcommon.LogEntry) SetOwnerOperatingOnAccountCalled func(newOwner []byte) error UpdateCodeDeployerAddressCalled func(scAddress string, newOwner []byte) error @@ -203,11 +204,10 @@ func (s *SystemEIStub) SendGlobalSettingToAll(sender []byte, input []byte) { } // Transfer - -func (s *SystemEIStub) Transfer(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) error { +func (s *SystemEIStub) Transfer(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) { if s.TransferCalled != nil { - return s.TransferCalled(destination, sender, value, input) + s.TransferCalled(destination, sender, value, input, gasLimit) } - return nil } // GetBalance - @@ -310,6 +310,14 @@ func (s *SystemEIStub) UpdateCodeDeployerAddress(scAddress string, newOwner []by return nil } +// ProcessBuiltInFunction - +func (s *SystemEIStub) ProcessBuiltInFunction(sender, destination []byte, function string, arguments [][]byte) (*vmcommon.VMOutput, error) { + if s.ProcessBuiltInFunctionCalled != nil { + return s.ProcessBuiltInFunctionCalled(sender, destination, function, arguments) + } + return &vmcommon.VMOutput{}, nil +} + // IsInterfaceNil - func (s *SystemEIStub) IsInterfaceNil() bool { return s == nil diff --git a/vm/process/systemVM.go b/vm/process/systemVM.go index 6a3452304fa..ca0553cb714 100644 --- a/vm/process/systemVM.go +++ b/vm/process/systemVM.go @@ -6,9 +6,10 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/vm" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) type systemVM struct { @@ -18,6 +19,7 @@ type systemVM struct { asyncCallbackGasLock uint64 asyncCallStepCost uint64 mutGasLock sync.RWMutex + criticalSection sync.Mutex } // ArgsNewSystemVM defines the needed arguments to create a new system vm @@ -68,6 +70,9 @@ func NewSystemVM(args ArgsNewSystemVM) (*systemVM, error) { // RunSmartContractCreate creates and saves a new smart contract to the trie func (s *systemVM) RunSmartContractCreate(input *vmcommon.ContractCreateInput) (*vmcommon.VMOutput, error) { + s.criticalSection.Lock() + defer s.criticalSection.Unlock() + if input == nil { return nil, vm.ErrInputArgsIsNil } @@ -101,6 +106,9 @@ func (s *systemVM) RunSmartContractCreate(input *vmcommon.ContractCreateInput) ( // RunSmartContractCall executes a smart contract according to the input func (s *systemVM) RunSmartContractCall(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { + s.criticalSection.Lock() + defer s.criticalSection.Unlock() + s.systemEI.CleanCache() s.systemEI.SetSCAddress(input.RecipientAddr) s.systemEI.AddTxValueToSmartContract(input.CallValue, input.RecipientAddr) diff --git a/vm/systemSmartContracts/defaults/gasMap.go b/vm/systemSmartContracts/defaults/gasMap.go index 28ba8398f5a..cff4266547a 100644 --- a/vm/systemSmartContracts/defaults/gasMap.go +++ b/vm/systemSmartContracts/defaults/gasMap.go @@ -47,6 +47,11 @@ func FillGasMapBuiltInCosts(value uint64) map[string]uint64 { gasMap["ESDTNFTAddUri"] = value gasMap["ESDTNFTUpdateAttributes"] = value gasMap["ESDTNFTMultiTransfer"] = value + gasMap["ESDTModifyRoyalties"] = value + gasMap["ESDTModifyCreator"] = value + gasMap["ESDTNFTRecreate"] = value + gasMap["ESDTNFTUpdate"] = value + gasMap["ESDTNFTSetNewURIs"] = value gasMap["SetGuardian"] = value gasMap["GuardAccount"] = value gasMap["UnGuardAccount"] = value diff --git a/vm/systemSmartContracts/delegation.go b/vm/systemSmartContracts/delegation.go index d71afabb6e2..da23e0c8a15 100644 --- a/vm/systemSmartContracts/delegation.go +++ b/vm/systemSmartContracts/delegation.go @@ -1215,6 +1215,13 @@ func (d *delegation) stakeNodes(args *vmcommon.ContractCallInput) vmcommon.Retur return vmOutput.ReturnCode } + allLogs := d.eei.GetLogs() + tooManyNodesErrMsg := getTooManyNodesErrMsg(allLogs) + if len(tooManyNodesErrMsg) != 0 { + d.eei.AddReturnMessage(tooManyNodesErrMsg) + return vmcommon.UserError + } + err = d.updateDelegationStatusAfterStake(status, vmOutput.ReturnData, args.Arguments) if err != nil { d.eei.AddReturnMessage(err.Error()) @@ -1226,6 +1233,27 @@ func (d *delegation) stakeNodes(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.Ok } +func getTooManyNodesErrMsg(logEntries []*vmcommon.LogEntry) string { + for _, logEntry := range logEntries { + topics := logEntry.Topics + if len(topics) != 3 { + continue + } + if bytes.Equal(topics[0], []byte(numberOfNodesTooHigh)) { + return formatTooManyNodesMsg(topics) + } + } + + return "" +} + +func formatTooManyNodesMsg(topics [][]byte) string { + numRegisteredBlsKeys := big.NewInt(0).SetBytes(topics[1]).Int64() + nodeLimit := big.NewInt(0).SetBytes(topics[2]).Int64() + return fmt.Sprintf("%s, num registered bls keys: %d, node limit: %d", + numberOfNodesTooHigh, numRegisteredBlsKeys, nodeLimit) +} + func (d *delegation) updateDelegationStatusAfterStake( status *DelegationContractStatus, returnData [][]byte, @@ -1430,11 +1458,7 @@ func (d *delegation) unJailNodes(args *vmcommon.ContractCallInput) vmcommon.Retu sendBackValue := getTransferBackFromVMOutput(vmOutput) if sendBackValue.Cmp(zero) > 0 { - err = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, sendBackValue, nil, 0) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, sendBackValue, nil, 0) } d.createAndAddLogEntry(args, args.Arguments...) @@ -1532,70 +1556,54 @@ func (d *delegation) finishDelegateUser( return vmcommon.UserError } - var err error - if len(delegator.ActiveFund) == 0 { - var fundKey []byte - fundKey, err = d.createAndSaveNextKeyFund(callerAddr, delegateValue, active) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - delegator.ActiveFund = fundKey - if isNew { - dStatus.NumUsers++ - } - } else { - err = d.addValueToFund(delegator.ActiveFund, delegateValue) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - } - - err = d.checkActiveFund(delegator) + err := d.addToActiveFund(callerAddr, delegator, delegateValue, dStatus, isNew) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - stakeArgs := d.makeStakeArgsIfAutomaticActivation(dConfig, dStatus, globalFund) - vmOutput, err := d.executeOnValidatorSC(scAddress, "stake", stakeArgs, callValue) + err = d.checkActiveFund(delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - if vmOutput.ReturnCode != vmcommon.Ok { - return vmOutput.ReturnCode - } - if len(stakeArgs) > 0 { - err = d.updateDelegationStatusAfterStake(dStatus, vmOutput.ReturnData, stakeArgs) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + returnCode := d.executeStakeAndUpdateStatus(dConfig, dStatus, globalFund, callValue, scAddress) + if returnCode != vmcommon.Ok { + return returnCode } - err = d.saveDelegationStatus(dStatus) + err = d.saveDelegatorData(callerAddr, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - err = d.saveGlobalFundData(globalFund) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError + return vmcommon.Ok +} + +func (d *delegation) addToActiveFund( + callerAddr []byte, + delegator *DelegatorData, + delegateValue *big.Int, + dStatus *DelegationContractStatus, + isNew bool, +) error { + if len(delegator.ActiveFund) > 0 { + return d.addValueToFund(delegator.ActiveFund, delegateValue) } - err = d.saveDelegatorData(callerAddr, delegator) + fundKey, err := d.createAndSaveNextKeyFund(callerAddr, delegateValue, active) if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError + return err } - return vmcommon.Ok + delegator.ActiveFund = fundKey + if isNew { + dStatus.NumUsers++ + } + + return nil } func (d *delegation) checkActiveFund(delegator *DelegatorData) error { @@ -1730,7 +1738,16 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - isNew, delegator, err := d.getOrCreateDelegatorData(args.CallerAddr) + return d.unDelegateValueFromAddress(args, valueToUnDelegate, args.CallerAddr, args.RecipientAddr) +} + +func (d *delegation) unDelegateValueFromAddress( + args *vmcommon.ContractCallInput, + valueToUnDelegate *big.Int, + delegatorAddress []byte, + contractAddress []byte, +) vmcommon.ReturnCode { + isNew, delegator, err := d.getOrCreateDelegatorData(delegatorAddress) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1763,12 +1780,13 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur d.eei.AddReturnMessage("invalid value to undelegate - need to undelegate all - do not leave dust behind") return vmcommon.UserError } - err = d.checkOwnerCanUnDelegate(args.CallerAddr, activeFund, valueToUnDelegate) + + err = d.checkOwnerCanUnDelegate(delegatorAddress, activeFund, valueToUnDelegate) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - err = d.computeAndUpdateRewards(args.CallerAddr, delegator) + err = d.computeAndUpdateRewards(delegatorAddress, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1780,7 +1798,7 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - returnData, returnCode := d.executeOnValidatorSCWithValueInArgs(args.RecipientAddr, "unStakeTokens", valueToUnDelegate) + returnData, returnCode := d.executeOnValidatorSCWithValueInArgs(contractAddress, "unStakeTokens", valueToUnDelegate) if returnCode != vmcommon.Ok { return returnCode } @@ -1798,7 +1816,7 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - err = d.addNewUnStakedFund(args.CallerAddr, delegator, actualUserUnStake) + err = d.addNewUnStakedFund(delegatorAddress, delegator, actualUserUnStake) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1822,7 +1840,7 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - err = d.saveDelegatorData(args.CallerAddr, delegator) + err = d.saveDelegatorData(delegatorAddress, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1971,11 +1989,31 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De isOwner := d.isOwner(callerAddress) + totalRewards, err := d.computeRewards(delegator.RewardsCheckpoint, isOwner, activeFund.Value) + if err != nil { + return err + } + delegator.UnClaimedRewards.Add(delegator.UnClaimedRewards, totalRewards) + delegator.RewardsCheckpoint = currentEpoch + 1 + + return nil +} + +func (d *delegation) computeRewards( + rewardsCheckpoint uint32, + isOwner bool, + activeValue *big.Int, +) (*big.Int, error) { totalRewards := big.NewInt(0) - for i := delegator.RewardsCheckpoint; i <= currentEpoch; i++ { + if activeValue.Cmp(zero) <= 0 { + return totalRewards, nil + } + + currentEpoch := d.eei.BlockChainHook().CurrentEpoch() + for i := rewardsCheckpoint; i <= currentEpoch; i++ { found, rewardData, errGet := d.getRewardComputationData(i) if errGet != nil { - return errGet + return nil, errGet } if !found { continue @@ -1999,7 +2037,7 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De rewardForDelegator := big.NewInt(0).Sub(rewardData.RewardsToDistribute, rewardsForOwner) // delegator reward is: rewardForDelegator * user stake / total active - rewardForDelegator.Mul(rewardForDelegator, activeFund.Value) + rewardForDelegator.Mul(rewardForDelegator, activeValue) rewardForDelegator.Div(rewardForDelegator, rewardData.TotalActive) if isOwner { @@ -2008,10 +2046,7 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De totalRewards.Add(totalRewards, rewardForDelegator) } - delegator.UnClaimedRewards.Add(delegator.UnClaimedRewards, totalRewards) - delegator.RewardsCheckpoint = currentEpoch + 1 - - return nil + return totalRewards, nil } func (d *delegation) claimRewards(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -2041,11 +2076,7 @@ func (d *delegation) claimRewards(args *vmcommon.ContractCallInput) vmcommon.Ret return vmcommon.UserError } - err = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, delegator.UnClaimedRewards, nil, 0) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, delegator.UnClaimedRewards, nil, 0) unclaimedRewardsBytes := delegator.UnClaimedRewards.Bytes() delegator.TotalCumulatedRewards.Add(delegator.TotalCumulatedRewards, delegator.UnClaimedRewards) @@ -2065,7 +2096,7 @@ func (d *delegation) claimRewards(args *vmcommon.ContractCallInput) vmcommon.Ret } } - d.createAndAddLogEntry(args, unclaimedRewardsBytes, boolToSlice(wasDeleted)) + d.createAndAddLogEntry(args, unclaimedRewardsBytes, boolToSlice(wasDeleted), args.RecipientAddr) return vmcommon.Ok } @@ -2112,6 +2143,7 @@ func (d *delegation) withdraw(args *vmcommon.ContractCallInput) vmcommon.ReturnC d.eei.AddReturnMessage(vm.ErrCallValueMustBeZero.Error()) return vmcommon.UserError } + err := d.eei.UseGas(d.gasCost.MetaChainSystemSCsCost.DelegationOps) if err != nil { d.eei.AddReturnMessage(err.Error()) @@ -2212,11 +2244,7 @@ func (d *delegation) withdraw(args *vmcommon.ContractCallInput) vmcommon.ReturnC return vmcommon.UserError } - err = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, actualUserUnBond, nil, 0) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, actualUserUnBond, nil, 0) var wasDeleted bool wasDeleted, err = d.deleteDelegatorOnWithdrawIfNeeded(args.CallerAddr, delegator) @@ -2294,7 +2322,8 @@ func (d *delegation) deleteDelegatorIfNeeded(address []byte, delegator *Delegato } func (d *delegation) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, d.endOfEpochAddr) { + if !bytes.Equal(args.CallerAddr, d.endOfEpochAddr) && + !bytes.Equal(args.CallerAddr, d.stakingSCAddr) { d.eei.AddReturnMessage("can be called by end of epoch address only") return vmcommon.UserError } @@ -2900,6 +2929,45 @@ func (d *delegation) addTokens(args *vmcommon.ContractCallInput) vmcommon.Return return vmcommon.Ok } +func (d *delegation) executeStakeAndUpdateStatus( + dConfig *DelegationConfig, + dStatus *DelegationContractStatus, + globalFund *GlobalFundData, + valueToStake *big.Int, + scAddress []byte, +) vmcommon.ReturnCode { + stakeArgs := d.makeStakeArgsIfAutomaticActivation(dConfig, dStatus, globalFund) + vmOutput, err := d.executeOnValidatorSC(scAddress, "stake", stakeArgs, valueToStake) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if vmOutput.ReturnCode != vmcommon.Ok { + return vmOutput.ReturnCode + } + + if len(stakeArgs) > 0 { + err = d.updateDelegationStatusAfterStake(dStatus, vmOutput.ReturnData, stakeArgs) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + } + + err = d.saveDelegationStatus(dStatus) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + err = d.saveGlobalFundData(globalFund) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + func (d *delegation) executeOnValidatorSC(address []byte, function string, args [][]byte, value *big.Int) (*vmcommon.VMOutput, error) { validatorCall := function for _, key := range args { @@ -2912,7 +2980,6 @@ func (d *delegation) executeOnValidatorSC(address []byte, function string, args } return vmOutput, nil - } func (d *delegation) getDelegationContractConfig() (*DelegationConfig, error) { diff --git a/vm/systemSmartContracts/delegationManager_test.go b/vm/systemSmartContracts/delegationManager_test.go index b683ac4331c..e2b4de77d8f 100644 --- a/vm/systemSmartContracts/delegationManager_test.go +++ b/vm/systemSmartContracts/delegationManager_test.go @@ -1171,7 +1171,7 @@ func TestDelegationManagerSystemSC_ClaimMultipleDelegationDuplicatedInput(t *tes GetCalled: func(key []byte) (vm.SystemSmartContract, error) { return &mock.SystemSCStub{ ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - _ = d.eei.Transfer(args.RecipientAddr, args.CallerAddr, big.NewInt(10), nil, 0) + d.eei.Transfer(args.RecipientAddr, args.CallerAddr, big.NewInt(10), nil, 0) return vmcommon.Ok }, }, nil @@ -1197,7 +1197,7 @@ func TestDelegationManagerSystemSC_ClaimMultipleDelegation(t *testing.T) { GetCalled: func(key []byte) (vm.SystemSmartContract, error) { return &mock.SystemSCStub{ ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - _ = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, big.NewInt(10), nil, 0) + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, big.NewInt(10), nil, 0) return vmcommon.Ok }, }, nil diff --git a/vm/systemSmartContracts/delegation_test.go b/vm/systemSmartContracts/delegation_test.go index 73cbab30716..fe93b1c8368 100644 --- a/vm/systemSmartContracts/delegation_test.go +++ b/vm/systemSmartContracts/delegation_test.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/mock" @@ -158,6 +159,14 @@ func createDelegationContractAndEEI() (*delegation, *vmContext) { args.DelegationSCConfig.MaxServiceFee = 10000 args.DelegationSCConfig.MinServiceFee = 0 d, _ := NewDelegationSystemSC(args) + + managementData := &DelegationManagement{ + MinDeposit: big.NewInt(10), + MinDelegationAmount: big.NewInt(10), + } + marshaledData, _ := d.marshalizer.Marshal(managementData) + eei.SetStorageForAddress(d.delegationMgrSCAddress, []byte(delegationManagementKey), marshaledData) + return d, eei } @@ -1615,9 +1624,16 @@ func TestDelegationSystemSC_ExecuteUnDelegateUserErrorsWhenGettingMinimumDelegat }) d.eei.SetStorage([]byte(lastFundKey), fundKey) + managementData := &DelegationManagement{ + MinDeposit: big.NewInt(50), + MinDelegationAmount: big.NewInt(50), + } + marshaledData, _ := d.marshalizer.Marshal(managementData) + eei.SetStorageForAddress(d.delegationMgrSCAddress, []byte(delegationManagementKey), marshaledData) + output := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) - assert.True(t, strings.Contains(eei.returnMessage, "error getting minimum delegation amount")) + assert.True(t, strings.Contains(eei.returnMessage, "invalid value to undelegate - need to undelegate all - do not leave dust behind")) } func TestDelegationSystemSC_ExecuteUnDelegateUserNotDelegatorOrNoActiveFundShouldErr(t *testing.T) { @@ -5028,3 +5044,139 @@ func TestDelegationSystemSC_SynchronizeOwner(t *testing.T) { eei.ResetReturnMessage() }) } + +func TestDelegationSystemSC_ExecuteAddNodesStakeNodesWithNodesLimit(t *testing.T) { + t.Parallel() + + sig := []byte("sig1") + args := createMockArgumentsForDelegation() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub( + common.StakingV4Step1Flag, + common.StakingV4Step2Flag, + common.StakingV4Step3Flag, + common.StakeLimitsFlag, + + common.DelegationSmartContractFlag, + common.StakingV2FlagAfterEpoch, + common.AddTokensToDelegationFlag, + common.DeleteDelegatorAfterClaimRewardsFlag, + common.ComputeRewardCheckpointFlag, + common.ValidatorToDelegationFlag, + common.ReDelegateBelowMinCheckFlag, + common.MultiClaimOnDelegationFlag, + ) + eei := createDefaultEei() + delegationsMap := map[string][]byte{} + delegationsMap[ownerKey] = []byte("owner") + eei.storageUpdate[string(eei.scAddress)] = delegationsMap + args.Eei = eei + + d, _ := NewDelegationSystemSC(args) + + blsKey1 := []byte("blsKey1") + blsKey2 := []byte("blsKey2") + key1 := &NodesData{ + BLSKey: blsKey1, + } + key2 := &NodesData{ + BLSKey: blsKey2, + } + dStatus := &DelegationContractStatus{ + StakedKeys: []*NodesData{key1, key2}, + } + _ = d.saveDelegationStatus(dStatus) + + globalFund := &GlobalFundData{ + TotalActive: big.NewInt(400), + } + _ = d.saveGlobalFundData(globalFund) + + addValidatorAndStakingScToVmContextWithBlsKeys(eei, [][]byte{blsKey1, blsKey2}) + + dStatus, _ = d.getDelegationStatus() + require.Equal(t, 2, len(dStatus.StakedKeys)) + require.Equal(t, 0, len(dStatus.UnStakedKeys)) + require.Equal(t, 0, len(dStatus.NotStakedKeys)) + + newBlsKey1 := []byte("newBlsKey1") + vmInput := getDefaultVmInputForFunc("addNodes", [][]byte{newBlsKey1, sig}) + output := d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, output) + + vmInput = getDefaultVmInputForFunc("stakeNodes", [][]byte{newBlsKey1}) + output = d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, output) + + dStatus, _ = d.getDelegationStatus() + require.Equal(t, 3, len(dStatus.StakedKeys)) + require.Equal(t, 0, len(dStatus.UnStakedKeys)) + require.Equal(t, 0, len(dStatus.NotStakedKeys)) + + addValidatorAndStakingScToVmContextWithBlsKeys(eei, [][]byte{blsKey1, blsKey2, newBlsKey1}) + + newBlsKey2 := []byte("newBlsKey2") + vmInput = getDefaultVmInputForFunc("addNodes", [][]byte{newBlsKey2, sig}) + output = d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, output) + + vmInput = getDefaultVmInputForFunc("stakeNodes", [][]byte{newBlsKey2}) + output = d.Execute(vmInput) + require.Equal(t, vmcommon.UserError, output) + require.True(t, strings.Contains(eei.returnMessage, numberOfNodesTooHigh)) + require.True(t, strings.Contains(eei.returnMessage, "num registered bls keys: 3")) + require.True(t, strings.Contains(eei.returnMessage, "node limit: 3")) + + dStatus, _ = d.getDelegationStatus() + require.Equal(t, 3, len(dStatus.StakedKeys)) + require.Equal(t, 0, len(dStatus.UnStakedKeys)) + require.Equal(t, 1, len(dStatus.NotStakedKeys)) +} + +func addValidatorAndStakingScToVmContextWithBlsKeys(eei *vmContext, blsKeys [][]byte) { + validatorArgs := createMockArgumentsForValidatorSC() + validatorArgs.StakingSCConfig.NodeLimitPercentage = 1 + validatorArgs.Eei = eei + validatorArgs.StakingSCConfig.GenesisNodePrice = "100" + validatorArgs.StakingSCAddress = vm.StakingSCAddress + validatorArgs.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetNumTotalEligibleCalled: func() uint64 { + return 3 + }, + } + validatorSc, _ := NewValidatorSmartContract(validatorArgs) + + stakingArgs := createMockStakingScArguments() + stakingArgs.Eei = eei + stakingSc, _ := NewStakingSmartContract(stakingArgs) + + _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { + if bytes.Equal(key, vm.StakingSCAddress) { + return stakingSc, nil + } + + if bytes.Equal(key, vm.ValidatorSCAddress) { + _ = validatorSc.saveRegistrationData([]byte("addr"), &ValidatorDataV2{ + RewardAddress: []byte("rewardAddr"), + TotalStakeValue: big.NewInt(1000), + LockedStake: big.NewInt(500), + BlsPubKeys: blsKeys, + TotalUnstaked: big.NewInt(150), + UnstakedInfo: []*UnstakedValue{ + { + UnstakedEpoch: 10, + UnstakedValue: big.NewInt(60), + }, + { + UnstakedEpoch: 50, + UnstakedValue: big.NewInt(80), + }, + }, + NumRegistered: uint32(len(blsKeys)), + }) + validatorSc.unBondPeriod = 50 + return validatorSc, nil + } + + return nil, nil + }}) +} diff --git a/vm/systemSmartContracts/eei.go b/vm/systemSmartContracts/eei.go index a2743693694..55f554d11b0 100644 --- a/vm/systemSmartContracts/eei.go +++ b/vm/systemSmartContracts/eei.go @@ -1,6 +1,7 @@ package systemSmartContracts import ( + "errors" "fmt" "math/big" @@ -75,6 +76,7 @@ func NewVMContext(args VMContextArgs) (*vmContext, error) { err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.MultiClaimOnDelegationFlag, common.SetSenderInEeiOutputTransferFlag, + common.AlwaysMergeContextsInEEIFlag, }) if err != nil { return nil, err @@ -217,6 +219,17 @@ func (host *vmContext) SendGlobalSettingToAll(_ []byte, input []byte) { } } +func (host *vmContext) transferValueOnly( + destination []byte, + sender []byte, + value *big.Int, +) { + senderAcc, destAcc := host.getSenderDestination(sender, destination) + + _ = senderAcc.BalanceDelta.Sub(senderAcc.BalanceDelta, value) + _ = destAcc.BalanceDelta.Add(destAcc.BalanceDelta, value) +} + func (host *vmContext) getSenderDestination(sender, destination []byte) (*vmcommon.OutputAccount, *vmcommon.OutputAccount) { senderAcc, exists := host.outputAccounts[string(sender)] if !exists { @@ -241,17 +254,6 @@ func (host *vmContext) getSenderDestination(sender, destination []byte) (*vmcomm return senderAcc, destAcc } -func (host *vmContext) transferValueOnly( - destination []byte, - sender []byte, - value *big.Int, -) { - senderAcc, destAcc := host.getSenderDestination(sender, destination) - - _ = senderAcc.BalanceDelta.Sub(senderAcc.BalanceDelta, value) - _ = destAcc.BalanceDelta.Add(destAcc.BalanceDelta, value) -} - // Transfer handles any necessary value transfer required and takes // the necessary steps to create accounts func (host *vmContext) Transfer( @@ -260,7 +262,7 @@ func (host *vmContext) Transfer( value *big.Int, input []byte, gasLimit uint64, -) error { +) { host.transferValueOnly(destination, sender, value) senderAcc, destAcc := host.getSenderDestination(sender, destination) outputTransfer := vmcommon.OutputTransfer{ @@ -275,8 +277,6 @@ func (host *vmContext) Transfer( outputTransfer.SenderAddress = senderAcc.Address } destAcc.OutputTransfers = append(destAcc.OutputTransfers, outputTransfer) - - return nil } // GetLogs returns the logs @@ -340,8 +340,11 @@ func (host *vmContext) properMergeContexts(parentContext *vmContext, returnCode host.scAddress = parentContext.scAddress host.AddReturnMessage(parentContext.returnMessage) - if returnCode != vmcommon.Ok { - // no need to merge - revert was done - transaction will fail + + // merge contexts if the return code is OK or the fix flag is activated because it was wrong not to merge them if the call failed + shouldMergeContexts := returnCode == vmcommon.Ok || host.enableEpochsHandler.IsFlagEnabled(common.AlwaysMergeContextsInEEIFlag) + if !shouldMergeContexts { + // backwards compatibility return } @@ -432,7 +435,8 @@ func createDirectCallInput( func (host *vmContext) transferBeforeInternalExec(callInput *vmcommon.ContractCallInput, sender []byte, callType string) error { if !host.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { - return host.Transfer(callInput.RecipientAddr, sender, callInput.CallValue, nil, 0) + host.Transfer(callInput.RecipientAddr, sender, callInput.CallValue, nil, 0) + return nil } host.transferValueOnly(callInput.RecipientAddr, sender, callInput.CallValue) @@ -530,6 +534,8 @@ func (host *vmContext) ExecuteOnDestContext(destination []byte, sender []byte, v vmOutput := &vmcommon.VMOutput{ReturnCode: vmcommon.UserError} currContext := host.copyToNewContext() defer func() { + // we need to reset here the output since it was already transferred in the vmOutput (host.CreateVMOutput() function) + // and we do not want to duplicate them host.output = make([][]byte, 0) host.properMergeContexts(currContext, vmOutput.ReturnCode) }() @@ -593,6 +599,42 @@ func (host *vmContext) AddLogEntry(entry *vmcommon.LogEntry) { host.logs = append(host.logs, entry) } +// ProcessBuiltInFunction will process the given built in function and will merge the generated output accounts and logs +func (host *vmContext) ProcessBuiltInFunction( + sender, destination []byte, + function string, + arguments [][]byte, +) (*vmcommon.VMOutput, error) { + vmInput := createDirectCallInput(destination, sender, big.NewInt(0), function, arguments) + vmInput.GasProvided = host.GasLeft() + vmOutput, err := host.blockChainHook.ProcessBuiltInFunction(vmInput) + if err != nil { + return nil, err + } + if vmOutput.ReturnCode != vmcommon.Ok { + return nil, errors.New(vmOutput.ReturnMessage) + } + + for address, outAcc := range vmOutput.OutputAccounts { + if len(outAcc.OutputTransfers) > 0 { + leftAccount, exist := host.outputAccounts[address] + if !exist { + leftAccount = &vmcommon.OutputAccount{ + Address: []byte(address), + } + host.outputAccounts[address] = leftAccount + } + leftAccount.OutputTransfers = append(leftAccount.OutputTransfers, outAcc.OutputTransfers...) + } + } + + for _, logEntry := range vmOutput.Logs { + host.AddLogEntry(logEntry) + } + + return vmOutput, nil +} + // BlockChainHook returns the blockchain hook func (host *vmContext) BlockChainHook() vm.BlockchainHook { return host.blockChainHook diff --git a/vm/systemSmartContracts/eei_test.go b/vm/systemSmartContracts/eei_test.go index 0d5df038a98..aa1120e452d 100644 --- a/vm/systemSmartContracts/eei_test.go +++ b/vm/systemSmartContracts/eei_test.go @@ -200,9 +200,7 @@ func TestVmContext_Transfer(t *testing.T) { value := big.NewInt(999) input := []byte("input") - err := vmCtx.Transfer(destination, sender, value, input, 0) - assert.Nil(t, err) - + vmCtx.Transfer(destination, sender, value, input, 0) balance := vmCtx.GetBalance(destination) assert.Equal(t, value.Uint64(), balance.Uint64()) diff --git a/vm/systemSmartContracts/esdt.go b/vm/systemSmartContracts/esdt.go index b353a052b42..99b29035aef 100644 --- a/vm/systemSmartContracts/esdt.go +++ b/vm/systemSmartContracts/esdt.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/vm" + logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -41,16 +42,16 @@ const canTransferNFTCreateRole = "canTransferNFTCreateRole" const upgradable = "canUpgrade" const canCreateMultiShard = "canCreateMultiShard" const upgradeProperties = "upgradeProperties" +const eGLD = "EGLD" const conversionBase = 10 -const metaESDT = "MetaESDT" type esdt struct { eei vm.SystemEI gasCost vm.GasCost baseIssuingCost *big.Int ownerAddress []byte // do not use this in functions. Should use e.getEsdtOwner() - eSDTSCAddress []byte + esdtSCAddress []byte endOfEpochSCAddress []byte marshalizer marshal.Marshalizer hasher hashing.Hasher @@ -100,6 +101,7 @@ func NewESDTSmartContract(args ArgsNewESDTSmartContract) (*esdt, error) { common.MetaESDTSetFlag, common.ESDTNFTCreateOnMultiShardFlag, common.NFTStopCreateFlag, + common.DynamicESDTFlag, }) if err != nil { return nil, err @@ -110,7 +112,6 @@ func NewESDTSmartContract(args ArgsNewESDTSmartContract) (*esdt, error) { if len(args.EndOfEpochSCAddress) == 0 { return nil, vm.ErrNilEndOfEpochSmartContractAddress } - baseIssuingCost, okConvert := big.NewInt(0).SetString(args.ESDTSCConfig.BaseIssuingCost, conversionBase) if !okConvert || baseIssuingCost.Cmp(big.NewInt(0)) < 0 { return nil, vm.ErrInvalidBaseIssuingCost @@ -123,7 +124,7 @@ func NewESDTSmartContract(args ArgsNewESDTSmartContract) (*esdt, error) { // we should have called pubkeyConverter.Decode here instead of a byte slice cast. Since that change would break // backwards compatibility, the fix was carried in the epochStart/metachain/systemSCs.go ownerAddress: []byte(args.ESDTSCConfig.OwnerAddress), - eSDTSCAddress: args.ESDTSCAddress, + esdtSCAddress: args.ESDTSCAddress, hasher: args.Hasher, marshalizer: args.Marshalizer, endOfEpochSCAddress: args.EndOfEpochSCAddress, @@ -215,6 +216,14 @@ func (e *esdt) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { return e.unsetBurnRoleGlobally(args) case "sendAllTransferRoleAddresses": return e.sendAllTransferRoleAddresses(args) + case "updateTokenID": + return e.updateTokenID(args) + case "registerDynamic": + return e.registerDynamic(args) + case "registerAndSetAllRolesDynamic": + return e.registerAndSetAllRolesDynamic(args) + case "changeToDynamic": + return e.changeToDynamic(args) } e.eei.AddReturnMessage("invalid method to call") @@ -317,11 +326,7 @@ func (e *esdt) issue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if initialSupply.Cmp(zero) > 0 { esdtTransferData := core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString(tokenIdentifier) + "@" + hex.EncodeToString(initialSupply.Bytes()) - err = e.eei.Transfer(args.CallerAddr, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.CallerAddr, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) } else { e.eei.Finish(tokenIdentifier) } @@ -346,6 +351,11 @@ func (e *esdt) registerNonFungible(args *vmcommon.ContractCallInput) vmcommon.Re return vmcommon.UserError } + tokenType := []byte(core.NonFungibleESDT) + if e.enableEpochsHandler.IsFlagEnabled(common.DynamicESDTFlag) { + tokenType = []byte(core.NonFungibleESDTv2) + } + tokenIdentifier, _, err := e.createNewToken( args.CallerAddr, args.Arguments[0], @@ -353,7 +363,7 @@ func (e *esdt) registerNonFungible(args *vmcommon.ContractCallInput) vmcommon.Re big.NewInt(0), 0, args.Arguments[2:], - []byte(core.NonFungibleESDT)) + tokenType) if err != nil { e.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -363,7 +373,7 @@ func (e *esdt) registerNonFungible(args *vmcommon.ContractCallInput) vmcommon.Re logEntry := &vmcommon.LogEntry{ Identifier: []byte(args.Function), Address: args.CallerAddr, - Topics: [][]byte{tokenIdentifier, args.Arguments[0], args.Arguments[1], []byte(core.NonFungibleESDT), big.NewInt(0).Bytes()}, + Topics: [][]byte{tokenIdentifier, args.Arguments[0], args.Arguments[1], tokenType, big.NewInt(0).Bytes()}, } e.eei.AddLogEntry(logEntry) @@ -436,7 +446,7 @@ func (e *esdt) registerMetaESDT(args *vmcommon.ContractCallInput) vmcommon.Retur big.NewInt(0), numOfDecimals, args.Arguments[3:], - []byte(metaESDT)) + []byte(core.MetaESDT)) if err != nil { e.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -447,14 +457,14 @@ func (e *esdt) registerMetaESDT(args *vmcommon.ContractCallInput) vmcommon.Retur logEntry := &vmcommon.LogEntry{ Identifier: []byte(args.Function), Address: args.CallerAddr, - Topics: [][]byte{tokenIdentifier, args.Arguments[0], args.Arguments[1], []byte(metaESDT), big.NewInt(int64(numOfDecimals)).Bytes()}, + Topics: [][]byte{tokenIdentifier, args.Arguments[0], args.Arguments[1], []byte(core.MetaESDT), big.NewInt(int64(numOfDecimals)).Bytes()}, } e.eei.AddLogEntry(logEntry) return vmcommon.Ok } -// arguments list: tokenName, tickerID prefix, type of token, numDecimals, numGlobalSettings, listGlobalSettings, list(address, special roles) +// arguments list: tokenName, tickerID prefix, type of token, numDecimals, numGlobalSettings, listGlobalSettings func (e *esdt) registerAndSetRoles(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !e.enableEpochsHandler.IsFlagEnabled(common.ESDTRegisterAndSetAllRolesFlag) { e.eei.AddReturnMessage("invalid method to call") @@ -469,7 +479,7 @@ func (e *esdt) registerAndSetRoles(args *vmcommon.ContractCallInput) vmcommon.Re e.eei.AddReturnMessage("arguments length mismatch") return vmcommon.UserError } - isWithDecimals, tokenType, err := getTokenType(args.Arguments[2]) + isWithDecimals, tokenType, err := e.getTokenType(args.Arguments[2]) if err != nil { e.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -502,7 +512,7 @@ func (e *esdt) registerAndSetRoles(args *vmcommon.ContractCallInput) vmcommon.Re return vmcommon.UserError } - allRoles, err := getAllRolesForTokenType(string(tokenType)) + allRoles, err := e.getAllRolesForTokenType(string(tokenType)) if err != nil { e.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -535,34 +545,81 @@ func (e *esdt) registerAndSetRoles(args *vmcommon.ContractCallInput) vmcommon.Re return vmcommon.Ok } -func getAllRolesForTokenType(tokenType string) ([][]byte, error) { +func (e *esdt) getAllRolesForTokenType(tokenType string) ([][]byte, error) { switch tokenType { - case core.NonFungibleESDT: - return [][]byte{[]byte(core.ESDTRoleNFTCreate), []byte(core.ESDTRoleNFTBurn), []byte(core.ESDTRoleNFTUpdateAttributes), []byte(core.ESDTRoleNFTAddURI)}, nil - case core.SemiFungibleESDT, metaESDT: + case core.NonFungibleESDT, core.NonFungibleESDTv2, core.DynamicNFTESDT: + nftRoles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleNFTBurn), + []byte(core.ESDTRoleNFTUpdateAttributes), + []byte(core.ESDTRoleNFTAddURI), + } + + if e.enableEpochsHandler.IsFlagEnabled(common.DynamicESDTFlag) { + nftRoles = append(nftRoles, [][]byte{ + []byte(core.ESDTRoleNFTRecreate), + []byte(core.ESDTRoleModifyCreator), + []byte(core.ESDTRoleModifyRoyalties), + []byte(core.ESDTRoleSetNewURI), + []byte(core.ESDTRoleNFTUpdate), + }...) + } + + return nftRoles, nil + case core.SemiFungibleESDT, core.MetaESDT: return [][]byte{[]byte(core.ESDTRoleNFTCreate), []byte(core.ESDTRoleNFTBurn), []byte(core.ESDTRoleNFTAddQuantity)}, nil case core.FungibleESDT: return [][]byte{[]byte(core.ESDTRoleLocalMint), []byte(core.ESDTRoleLocalBurn)}, nil + case core.DynamicSFTESDT, core.DynamicMetaESDT: + dynamicRoles := [][]byte{ + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleNFTBurn), + []byte(core.ESDTRoleNFTAddQuantity), + []byte(core.ESDTRoleNFTUpdateAttributes), + []byte(core.ESDTRoleNFTAddURI), + } + + dynamicRoles = append(dynamicRoles, [][]byte{ + []byte(core.ESDTRoleNFTRecreate), + []byte(core.ESDTRoleModifyCreator), + []byte(core.ESDTRoleModifyRoyalties), + []byte(core.ESDTRoleSetNewURI), + []byte(core.ESDTRoleNFTUpdate), + }...) + + return dynamicRoles, nil } return nil, vm.ErrInvalidArgument } -func getTokenType(compressed []byte) (bool, []byte, error) { +func (e *esdt) getTokenType(compressed []byte) (bool, []byte, error) { // TODO: might extract the compressed constants to core, alongside metaESDT switch string(compressed) { case "NFT": + if e.enableEpochsHandler.IsFlagEnabled(common.DynamicESDTFlag) { + return false, []byte(core.NonFungibleESDTv2), nil + } return false, []byte(core.NonFungibleESDT), nil case "SFT": return false, []byte(core.SemiFungibleESDT), nil case "META": - return true, []byte(metaESDT), nil + return true, []byte(core.MetaESDT), nil case "FNG": return true, []byte(core.FungibleESDT), nil } return false, nil, vm.ErrInvalidArgument } +func getDynamicTokenType(tokenType []byte) []byte { + if bytes.Equal(tokenType, []byte(core.NonFungibleESDTv2)) || + bytes.Equal(tokenType, []byte(core.NonFungibleESDT)) { + return []byte(core.DynamicNFTESDT) + } + + return append([]byte(core.Dynamic), tokenType...) +} + func (e *esdt) changeSFTToMetaESDT(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !e.enableEpochsHandler.IsFlagEnabled(common.MetaESDTSetFlag) { e.eei.AddReturnMessage("invalid method to call") @@ -592,7 +649,7 @@ func (e *esdt) changeSFTToMetaESDT(args *vmcommon.ContractCallInput) vmcommon.Re return vmcommon.UserError } - token.TokenType = []byte(metaESDT) + token.TokenType = []byte(core.MetaESDT) token.NumDecimals = numOfDecimals err := e.saveToken(args.Arguments[0], token) if err != nil { @@ -603,10 +660,12 @@ func (e *esdt) changeSFTToMetaESDT(args *vmcommon.ContractCallInput) vmcommon.Re logEntry := &vmcommon.LogEntry{ Identifier: []byte(args.Function), Address: args.CallerAddr, - Topics: [][]byte{args.Arguments[0], token.TokenName, token.TickerName, []byte(metaESDT), args.Arguments[1]}, + Topics: [][]byte{args.Arguments[0], token.TokenName, token.TickerName, []byte(core.MetaESDT), args.Arguments[1]}, } e.eei.AddLogEntry(logEntry) + e.sendTokenTypeToSystemAccounts(args.CallerAddr, args.Arguments[0], token) + return vmcommon.Ok } @@ -642,6 +701,7 @@ func (e *esdt) createNewToken( Upgradable: true, CanAddSpecialRoles: true, } + err = e.upgradeProperties(tokenIdentifier, newESDTToken, properties, true, owner) if err != nil { return nil, nil, err @@ -653,6 +713,8 @@ func (e *esdt) createNewToken( return nil, nil, err } + e.sendTokenTypeToSystemAccounts(owner, tokenIdentifier, newESDTToken) + return tokenIdentifier, newESDTToken, nil } @@ -687,6 +749,11 @@ func isTokenNameHumanReadable(tokenName []byte) bool { } func (e *esdt) createNewTokenIdentifier(caller []byte, ticker []byte) ([]byte, error) { + if e.enableEpochsHandler.IsFlagEnabled(common.EGLDInESDTMultiTransferFlag) { + if bytes.Equal(ticker, []byte(eGLD)) { + return nil, vm.ErrCouldNotCreateNewTokenIdentifier + } + } newRandomBase := append(caller, e.eei.BlockChainHook().CurrentRandomSeed()...) newRandom := e.hasher.Compute(string(newRandomBase)) newRandomForTicker := newRandom[:tickerRandomSequenceLength] @@ -841,12 +908,7 @@ func (e *esdt) burn(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } if !token.Burnable { esdtTransferData := core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString(args.Arguments[0]) + "@" + hex.EncodeToString(args.Arguments[1]) - err = e.eei.Transfer(args.CallerAddr, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - + e.eei.Transfer(args.CallerAddr, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) e.eei.AddReturnMessage("token is not burnable") if e.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { return vmcommon.UserError @@ -918,11 +980,7 @@ func (e *esdt) mint(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } esdtTransferData := core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString(args.Arguments[0]) + "@" + hex.EncodeToString(mintValue.Bytes()) - err = e.eei.Transfer(destination, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(destination, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) return vmcommon.Ok } @@ -947,11 +1005,7 @@ func (e *esdt) toggleFreeze(args *vmcommon.ContractCallInput, builtInFunc string } esdtTransferData := builtInFunc + "@" + hex.EncodeToString(args.Arguments[0]) - err := e.eei.Transfer(args.Arguments[1], e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.Arguments[1], e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) return vmcommon.Ok } @@ -997,11 +1051,7 @@ func (e *esdt) toggleFreezeSingleNFT(args *vmcommon.ContractCallInput, builtInFu composedArg := append(args.Arguments[0], args.Arguments[1]...) esdtTransferData := builtInFunc + "@" + hex.EncodeToString(composedArg) - err := e.eei.Transfer(args.Arguments[2], e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.Arguments[2], e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) return vmcommon.Ok } @@ -1027,14 +1077,10 @@ func (e *esdt) wipeTokenFromAddress( } esdtTransferData := core.BuiltInFunctionESDTWipe + "@" + hex.EncodeToString(wipeArgument) - err := e.eei.Transfer(address, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(address, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) token.NumWiped++ - err = e.saveToken(tokenID, token) + err := e.saveToken(tokenID, token) if err != nil { e.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1096,7 +1142,7 @@ func (e *esdt) togglePause(args *vmcommon.ContractCallInput, builtInFunc string) } esdtTransferData := builtInFunc + "@" + hex.EncodeToString(args.Arguments[0]) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) logEntry := &vmcommon.LogEntry{ Identifier: []byte(builtInFunc), @@ -1130,7 +1176,7 @@ func (e *esdt) saveTokenAndSendForAll(token *ESDTDataV2, tokenID []byte, builtIn } esdtTransferData := builtInCall + "@" + hex.EncodeToString(tokenID) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) return vmcommon.Ok } @@ -1199,7 +1245,7 @@ func (e *esdt) addBurnRoleAndSendToAllShards(token *ESDTDataV2, tokenID []byte) e.eei.AddLogEntry(logEntry) esdtTransferData := vmcommon.BuiltInFunctionESDTSetBurnRoleForAll + "@" + hex.EncodeToString(tokenID) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } func (e *esdt) configChange(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -1283,11 +1329,7 @@ func (e *esdt) claim(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } scBalance := e.eei.GetBalance(args.RecipientAddr) - err = e.eei.Transfer(args.CallerAddr, args.RecipientAddr, scBalance, nil, 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.CallerAddr, args.RecipientAddr, scBalance, nil, 0) return vmcommon.Ok } @@ -1363,9 +1405,11 @@ func (e *esdt) getSpecialRoles(args *vmcommon.ContractCallInput) vmcommon.Return rolesAsString = append(rolesAsString, string(role)) } - specialRoleAddress := e.addressPubKeyConverter.SilentEncode(specialRole.Address, log) - roles := strings.Join(rolesAsString, ",") + + specialRoleAddress, errEncode := e.addressPubKeyConverter.Encode(specialRole.Address) + e.treatEncodeErrorForGetSpecialRoles(errEncode, rolesAsString, specialRole.Address) + message := fmt.Sprintf("%s:%s", specialRoleAddress, roles) e.eei.Finish([]byte(message)) } @@ -1373,7 +1417,26 @@ func (e *esdt) getSpecialRoles(args *vmcommon.ContractCallInput) vmcommon.Return return vmcommon.Ok } -func (e *esdt) basicOwnershipChecks(args *vmcommon.ContractCallInput) (*ESDTDataV2, vmcommon.ReturnCode) { +func (e *esdt) treatEncodeErrorForGetSpecialRoles(err error, roles []string, address []byte) { + if err == nil { + return + } + + logLevel := logger.LogTrace + for _, role := range roles { + if role != vmcommon.ESDTRoleBurnForAll { + logLevel = logger.LogWarning + break + } + } + + log.Log(logLevel, "esdt.treatEncodeErrorForGetSpecialRoles", + "hex specialRole.Address", hex.EncodeToString(address), + "roles", strings.Join(roles, ", "), + "error", err) +} + +func (e *esdt) getTokenInfoAfterInputChecks(args *vmcommon.ContractCallInput) (*ESDTDataV2, vmcommon.ReturnCode) { if args.CallValue.Cmp(zero) != 0 { e.eei.AddReturnMessage("callValue must be 0") return nil, vmcommon.OutOfFunds @@ -1392,6 +1455,15 @@ func (e *esdt) basicOwnershipChecks(args *vmcommon.ContractCallInput) (*ESDTData e.eei.AddReturnMessage(err.Error()) return nil, vmcommon.UserError } + + return token, vmcommon.Ok +} + +func (e *esdt) basicOwnershipChecks(args *vmcommon.ContractCallInput) (*ESDTDataV2, vmcommon.ReturnCode) { + token, returnCode := e.getTokenInfoAfterInputChecks(args) + if returnCode != vmcommon.Ok { + return nil, returnCode + } if !bytes.Equal(token.OwnerAddress, args.CallerAddr) { e.eei.AddReturnMessage("can be called by owner only") return nil, vmcommon.UserError @@ -1574,24 +1646,93 @@ func (e *esdt) isSpecialRoleValidForNonFungible(argument string) error { return nil } return vm.ErrInvalidArgument + case core.ESDTRoleSetNewURI: + if e.enableEpochsHandler.IsFlagEnabled(common.DynamicESDTFlag) { + return nil + } + return vm.ErrInvalidArgument + case core.ESDTRoleModifyCreator: + if e.enableEpochsHandler.IsFlagEnabled(common.DynamicESDTFlag) { + return nil + } + return vm.ErrInvalidArgument + case core.ESDTRoleModifyRoyalties: + if e.enableEpochsHandler.IsFlagEnabled(common.DynamicESDTFlag) { + return nil + } + return vm.ErrInvalidArgument + case core.ESDTRoleNFTRecreate: + if e.enableEpochsHandler.IsFlagEnabled(common.DynamicESDTFlag) { + return nil + } + return vm.ErrInvalidArgument + case core.ESDTRoleNFTUpdate: + if e.enableEpochsHandler.IsFlagEnabled(common.DynamicESDTFlag) { + return nil + } + return vm.ErrInvalidArgument default: return vm.ErrInvalidArgument } } +func (e *esdt) isSpecialRoleValidForDynamicNFT(argument string) error { + switch argument { + case core.ESDTRoleNFTBurn: + return nil + case core.ESDTRoleNFTCreate: + return nil + case core.ESDTRoleTransfer: + return nil + case core.ESDTRoleNFTUpdateAttributes: + return nil + case core.ESDTRoleNFTAddURI: + return nil + case core.ESDTRoleSetNewURI: + return nil + case core.ESDTRoleModifyCreator: + return nil + case core.ESDTRoleModifyRoyalties: + return nil + case core.ESDTRoleNFTRecreate: + return nil + case core.ESDTRoleNFTUpdate: + return nil + default: + return vm.ErrInvalidArgument + } +} + +func (e *esdt) isSpecialRoleValidForDynamicSFT(argument string) error { + err := e.isSpecialRoleValidForDynamicNFT(argument) + if err == nil { + return nil + } + + if argument == core.ESDTRoleNFTAddQuantity { + return nil + } + + return vm.ErrInvalidArgument +} + func (e *esdt) checkSpecialRolesAccordingToTokenType(args [][]byte, token *ESDTDataV2) error { switch string(token.TokenType) { case core.FungibleESDT: return validateRoles(args, e.isSpecialRoleValidForFungible) - case core.NonFungibleESDT: + case core.NonFungibleESDT, core.NonFungibleESDTv2: return validateRoles(args, e.isSpecialRoleValidForNonFungible) case core.SemiFungibleESDT: return validateRoles(args, e.isSpecialRoleValidForSemiFungible) - case metaESDT: + case core.MetaESDT: isCheckMetaESDTOnRolesFlagEnabled := e.enableEpochsHandler.IsFlagEnabled(common.ManagedCryptoAPIsFlag) if isCheckMetaESDTOnRolesFlagEnabled { return validateRoles(args, e.isSpecialRoleValidForSemiFungible) } + case core.DynamicNFTESDT: + return validateRoles(args, e.isSpecialRoleValidForDynamicNFT) + case core.DynamicSFTESDT, core.DynamicMetaESDT: + return validateRoles(args, e.isSpecialRoleValidForDynamicSFT) } return nil } @@ -1641,11 +1782,7 @@ func (e *esdt) changeToMultiShardCreate(args *vmcommon.ContractCallInput) vmcomm isAddressLastByteZero := addressWithCreateRole[len(addressWithCreateRole)-1] == 0 if !isAddressLastByteZero { multiCreateRoleOnly := [][]byte{[]byte(core.ESDTRoleNFTCreateMultiShard)} - err = e.sendRoleChangeData(args.Arguments[0], addressWithCreateRole, multiCreateRoleOnly, core.BuiltInFunctionSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.sendRoleChangeData(args.Arguments[0], addressWithCreateRole, multiCreateRoleOnly, core.BuiltInFunctionSetESDTRole) } err = e.saveToken(args.Arguments[0], token) @@ -1657,6 +1794,47 @@ func (e *esdt) changeToMultiShardCreate(args *vmcommon.ContractCallInput) vmcomm return vmcommon.Ok } +func isDynamicTokenType(tokenType []byte) bool { + prefixLength := len(core.Dynamic) + if len(tokenType) < prefixLength { + return false + } + + return bytes.Equal(tokenType[:prefixLength], []byte(core.Dynamic)) +} + +func rolesForDynamicWhichHasToBeSingular() []string { + return []string{ + core.ESDTRoleNFTCreate, + core.ESDTRoleNFTUpdateAttributes, + core.ESDTRoleNFTAddURI, + core.ESDTRoleSetNewURI, + core.ESDTRoleModifyCreator, + core.ESDTRoleModifyRoyalties, + core.ESDTRoleNFTRecreate, + core.ESDTRoleNFTUpdate, + } +} + +func (e *esdt) checkRolesForDynamicTokens( + token *ESDTDataV2, + roles [][]byte, +) vmcommon.ReturnCode { + if !isDynamicTokenType(token.TokenType) { + return vmcommon.Ok + } + + rolesWhichHasToBeSingular := rolesForDynamicWhichHasToBeSingular() + for _, role := range rolesWhichHasToBeSingular { + if checkIfDefinedRoleExistsInArgsAndToken(roles, token, []byte(role)) { + e.eei.AddReturnMessage(role + " already exists") + return vmcommon.UserError + } + } + + return vmcommon.Ok +} + func (e *esdt) setRolesForTokenAndAddress( token *ESDTDataV2, address []byte, @@ -1684,6 +1862,11 @@ func (e *esdt) setRolesForTokenAndAddress( return nil, vmcommon.UserError } + returnCode := e.checkRolesForDynamicTokens(token, roles) + if returnCode != vmcommon.Ok { + return nil, returnCode + } + transferRoleExists := checkIfDefinedRoleExistsInArgsAndToken(roles, token, []byte(core.ESDTRoleTransfer)) esdtRole, isNew := getRolesForAddress(token, address) @@ -1728,16 +1911,13 @@ func (e *esdt) prepareAndSendRoleChangeData( if properties.isMultiShardNFTCreateSet { allRoles = append(allRoles, []byte(core.ESDTRoleNFTCreateMultiShard)) } - err := e.sendRoleChangeData(tokenID, address, allRoles, core.BuiltInFunctionSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.sendRoleChangeData(tokenID, address, allRoles, core.BuiltInFunctionSetESDTRole) + isTransferRoleDefinedInArgs := isDefinedRoleInArgs(roles, []byte(core.ESDTRoleTransfer)) - firstTransferRoleSet := !properties.transferRoleExists && isTransferRoleDefinedInArgs + firstTransferRoleSet := !properties.transferRoleExists && isDefinedRoleInArgs(roles, []byte(core.ESDTRoleTransfer)) if firstTransferRoleSet { esdtTransferData := core.BuiltInFunctionESDTSetLimitedTransfer + "@" + hex.EncodeToString(tokenID) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } if isTransferRoleDefinedInArgs { @@ -1840,12 +2020,7 @@ func (e *esdt) unSetSpecialRole(args *vmcommon.ContractCallInput) vmcommon.Retur esdtRole.Roles = esdtRole.Roles[:len(esdtRole.Roles)-1] } - err := e.sendRoleChangeData(args.Arguments[0], args.Arguments[1], args.Arguments[2:], core.BuiltInFunctionUnSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - + e.sendRoleChangeData(args.Arguments[0], args.Arguments[1], args.Arguments[2:], core.BuiltInFunctionUnSetESDTRole) if len(esdtRole.Roles) == 0 { for i, roles := range token.SpecialRoles { if bytes.Equal(roles.Address, address) { @@ -1863,14 +2038,14 @@ func (e *esdt) unSetSpecialRole(args *vmcommon.ContractCallInput) vmcommon.Retur lastTransferRoleWasDeleted := isTransferRoleInArgs && !transferRoleExists if lastTransferRoleWasDeleted { esdtTransferData := core.BuiltInFunctionESDTUnSetLimitedTransfer + "@" + hex.EncodeToString(args.Arguments[0]) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } if isTransferRoleInArgs { e.deleteTransferRoleAddressFromSystemAccount(args.Arguments[0], address) } - err = e.saveToken(args.Arguments[0], token) + err := e.saveToken(args.Arguments[0], token) if err != nil { e.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1886,7 +2061,7 @@ func (e *esdt) sendNewTransferRoleAddressToSystemAccount(token []byte, address [ } esdtTransferData := vmcommon.BuiltInFunctionESDTTransferRoleAddAddress + "@" + hex.EncodeToString(token) + "@" + hex.EncodeToString(address) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } func (e *esdt) deleteTransferRoleAddressFromSystemAccount(token []byte, address []byte) { @@ -1896,7 +2071,7 @@ func (e *esdt) deleteTransferRoleAddressFromSystemAccount(token []byte, address } esdtTransferData := vmcommon.BuiltInFunctionESDTTransferRoleDeleteAddress + "@" + hex.EncodeToString(token) + "@" + hex.EncodeToString(address) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } func (e *esdt) sendAllTransferRoleAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -1932,7 +2107,7 @@ func (e *esdt) sendAllTransferRoleAddresses(args *vmcommon.ContractCallInput) vm return vmcommon.UserError } - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) return vmcommon.Ok } @@ -2022,11 +2197,7 @@ func (e *esdt) transferNFTCreateRole(args *vmcommon.ContractCallInput) vmcommon. esdtTransferNFTCreateData := core.BuiltInFunctionESDTNFTCreateRoleTransfer + "@" + hex.EncodeToString(args.Arguments[0]) + "@" + hex.EncodeToString(args.Arguments[2]) - err = e.eei.Transfer(args.Arguments[1], e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferNFTCreateData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.Arguments[1], e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferNFTCreateData), 0) return vmcommon.Ok } @@ -2064,24 +2235,260 @@ func (e *esdt) stopNFTCreateForever(args *vmcommon.ContractCallInput) vmcommon.R } for _, currentOwner := range currentOwners { - err = e.sendRoleChangeData(args.Arguments[0], currentOwner, [][]byte{[]byte(core.ESDTRoleNFTCreate)}, core.BuiltInFunctionUnSetESDTRole) + e.sendRoleChangeData(args.Arguments[0], currentOwner, [][]byte{[]byte(core.ESDTRoleNFTCreate)}, core.BuiltInFunctionUnSetESDTRole) + } + + return vmcommon.Ok +} + +func (e *esdt) updateTokenID(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !e.enableEpochsHandler.IsFlagEnabled(common.DynamicESDTFlag) { + e.eei.AddReturnMessage("invalid method to call") + return vmcommon.FunctionNotFound + } + if len(args.Arguments) != 1 { + e.eei.AddReturnMessage("invalid number of arguments, wanted 1") + return vmcommon.FunctionWrongSignature + } + token, returnCode := e.getTokenInfoAfterInputChecks(args) + if returnCode != vmcommon.Ok { + return returnCode + } + + tokenID := args.Arguments[0] + if bytes.Equal(token.TokenType, []byte(core.NonFungibleESDT)) { + token.TokenType = []byte(core.NonFungibleESDTv2) + err := e.saveToken(tokenID, token) if err != nil { e.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } } + // TODO allow this to be called only once + e.sendTokenTypeToSystemAccounts(args.CallerAddr, args.Arguments[0], token) + + return vmcommon.Ok +} + +func (e *esdt) createDynamicToken(args *vmcommon.ContractCallInput) ([]byte, *ESDTDataV2, vmcommon.ReturnCode) { + if !e.enableEpochsHandler.IsFlagEnabled(common.DynamicESDTFlag) { + e.eei.AddReturnMessage("invalid method to call") + return nil, nil, vmcommon.FunctionNotFound + } + returnCode := e.checkBasicCreateArguments(args) + if returnCode != vmcommon.Ok { + return nil, nil, returnCode + } + if len(args.Arguments) < 3 { + e.eei.AddReturnMessage("not enough arguments") + return nil, nil, vmcommon.UserError + } + + isWithDecimals, tokenType, err := e.getTokenType(args.Arguments[2]) + if err != nil { + e.eei.AddReturnMessage(err.Error()) + return nil, nil, vmcommon.UserError + } + + if isNotAllowedToCreateDynamicToken(tokenType) { + e.eei.AddReturnMessage(fmt.Sprintf("cannot create %s tokens as dynamic", tokenType)) + return nil, nil, vmcommon.UserError + } + + propertiesStart := 3 + numOfDecimals := uint32(0) + if isWithDecimals { + propertiesStart++ + if len(args.Arguments) < propertiesStart { + e.eei.AddReturnMessage("not enough arguments") + return nil, nil, vmcommon.UserError + } + + numOfDecimals = uint32(big.NewInt(0).SetBytes(args.Arguments[3]).Uint64()) + if numOfDecimals < minNumberOfDecimals || numOfDecimals > maxNumberOfDecimals { + e.eei.AddReturnMessage(fmt.Errorf("%w, minimum: %d, maximum: %d, provided: %d", + vm.ErrInvalidNumberOfDecimals, + minNumberOfDecimals, + maxNumberOfDecimals, + numOfDecimals, + ).Error()) + return nil, nil, vmcommon.UserError + } + } + + dynamicTokenType := getDynamicTokenType(tokenType) + + tokenIdentifier, token, err := e.createNewToken( + args.CallerAddr, + args.Arguments[0], + args.Arguments[1], + big.NewInt(0), + numOfDecimals, + args.Arguments[propertiesStart:], + dynamicTokenType) + if err != nil { + e.eei.AddReturnMessage(err.Error()) + return nil, nil, vmcommon.UserError + } + + logEntry := &vmcommon.LogEntry{ + Identifier: []byte(args.Function), + Address: args.CallerAddr, + Topics: [][]byte{tokenIdentifier, args.Arguments[0], args.Arguments[1], dynamicTokenType, big.NewInt(int64(numOfDecimals)).Bytes()}, + } + e.eei.Finish(tokenIdentifier) + e.eei.AddLogEntry(logEntry) + + return tokenIdentifier, token, vmcommon.Ok +} + +func (e *esdt) registerDynamic(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + _, _, returnCode := e.createDynamicToken(args) + return returnCode +} + +func (e *esdt) registerAndSetAllRolesDynamic(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + tokenIdentifier, token, returnCode := e.createDynamicToken(args) + if returnCode != vmcommon.Ok { + return returnCode + } + + allRoles, err := e.getAllRolesForTokenType(string(token.TokenType)) + if err != nil { + e.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + properties, returnCode := e.setRolesForTokenAndAddress(token, args.CallerAddr, allRoles) + if returnCode != vmcommon.Ok { + return returnCode + } + + returnCode = e.prepareAndSendRoleChangeData(tokenIdentifier, args.CallerAddr, allRoles, properties) + if returnCode != vmcommon.Ok { + return returnCode + } + + err = e.saveToken(tokenIdentifier, token) + if err != nil { + e.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (e *esdt) checkRolesAreCompatibleToChangeToDynamic(token *ESDTDataV2) error { + mapOfRoles := make(map[string]uint32) + + for _, esdtRole := range token.SpecialRoles { + for _, role := range esdtRole.Roles { + mapOfRoles[string(role)]++ + } + } + + rolesWithHaveToBeSingular := rolesForDynamicWhichHasToBeSingular() + for _, role := range rolesWithHaveToBeSingular { + if mapOfRoles[role] > 1 { + return fmt.Errorf("%w, role %s was found multiple times", vm.ErrCannotChangeToDynamic, role) + } + } + + return nil +} + +func (e *esdt) changeToDynamic(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !e.enableEpochsHandler.IsFlagEnabled(common.DynamicESDTFlag) { + e.eei.AddReturnMessage("invalid method to call") + return vmcommon.FunctionNotFound + } + + token, returnCode := e.basicOwnershipChecks(args) + if returnCode != vmcommon.Ok { + return returnCode + } + + if isNotAllowed(token.TokenType) { + e.eei.AddReturnMessage(fmt.Sprintf("cannot change %s tokens to dynamic", token.TokenType)) + return vmcommon.UserError + } + if isDynamicTokenType(token.TokenType) { + e.eei.AddReturnMessage("tokenID is already dynamic") + return vmcommon.UserError + } + + err := e.checkRolesAreCompatibleToChangeToDynamic(token) + if err != nil { + e.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + token.TokenType = append([]byte(core.Dynamic), token.TokenType...) + + err = e.saveToken(args.Arguments[0], token) + if err != nil { + e.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + logEntry := &vmcommon.LogEntry{ + Identifier: []byte(args.Function), + Address: args.CallerAddr, + Topics: [][]byte{args.Arguments[0], token.TokenName, token.TickerName, token.TokenType}, + } + e.eei.AddLogEntry(logEntry) + + e.sendTokenTypeToSystemAccounts(args.CallerAddr, args.Arguments[0], token) + return vmcommon.Ok } -func (e *esdt) sendRoleChangeData(tokenID []byte, destination []byte, roles [][]byte, builtInFunc string) error { +func isNotAllowed(tokenType []byte) bool { + notAllowedTypes := [][]byte{ + []byte(core.FungibleESDT), + []byte(core.NonFungibleESDT), + []byte(core.NonFungibleESDTv2), + } + + for _, notAllowedType := range notAllowedTypes { + if bytes.Equal(tokenType, notAllowedType) { + return true + } + } + + return false +} + +func isNotAllowedToCreateDynamicToken(tokenType []byte) bool { + return bytes.Equal(tokenType, []byte(core.FungibleESDT)) +} + +func (e *esdt) sendTokenTypeToSystemAccounts(caller []byte, tokenID []byte, token *ESDTDataV2) { + if !e.enableEpochsHandler.IsFlagEnabled(common.DynamicESDTFlag) { + return + } + + builtInFunc := core.ESDTSetTokenType + esdtTransferData := builtInFunc + "@" + hex.EncodeToString(tokenID) + "@" + hex.EncodeToString(token.TokenType) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) + + logEntry := &vmcommon.LogEntry{ + Identifier: []byte(builtInFunc), + Address: caller, + Topics: [][]byte{tokenID, token.TokenType}, + Data: nil, + } + e.eei.AddLogEntry(logEntry) +} + +func (e *esdt) sendRoleChangeData(tokenID []byte, destination []byte, roles [][]byte, builtInFunc string) { esdtSetRoleData := builtInFunc + "@" + hex.EncodeToString(tokenID) for _, arg := range roles { esdtSetRoleData += "@" + hex.EncodeToString(arg) } - err := e.eei.Transfer(destination, e.eSDTSCAddress, big.NewInt(0), []byte(esdtSetRoleData), 0) - return err + e.eei.Transfer(destination, e.esdtSCAddress, big.NewInt(0), []byte(esdtSetRoleData), 0) } func (e *esdt) getAllAddressesAndRoles(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { diff --git a/vm/systemSmartContracts/esdt_test.go b/vm/systemSmartContracts/esdt_test.go index 050d5cc452d..81486e3fba6 100644 --- a/vm/systemSmartContracts/esdt_test.go +++ b/vm/systemSmartContracts/esdt_test.go @@ -794,7 +794,7 @@ func TestEsdt_ExecuteMintInvalidDestinationAddressShouldFail(t *testing.T) { assert.True(t, strings.Contains(eei.returnMessage, "destination address of invalid length")) } -func TestEsdt_ExecuteMintTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteMintTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -807,9 +807,6 @@ func TestEsdt_ExecuteMintTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -818,7 +815,7 @@ func TestEsdt_ExecuteMintTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("mint", [][]byte{[]byte("esdtToken"), {200}}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } func TestEsdt_ExecuteMintWithTwoArgsShouldSetOwnerAsDestination(t *testing.T) { @@ -1080,7 +1077,7 @@ func TestEsdt_ExecuteToggleFreezeNonFreezableTokenShouldFail(t *testing.T) { assert.True(t, strings.Contains(eei.returnMessage, "cannot freeze")) } -func TestEsdt_ExecuteToggleFreezeTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteToggleFreezeTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1092,9 +1089,6 @@ func TestEsdt_ExecuteToggleFreezeTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1103,10 +1097,10 @@ func TestEsdt_ExecuteToggleFreezeTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("freeze", [][]byte{[]byte("esdtToken"), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } -func TestEsdt_ExecuteToggleFreezeSingleNFTTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteToggleFreezeSingleNFTTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1119,9 +1113,6 @@ func TestEsdt_ExecuteToggleFreezeSingleNFTTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1130,7 +1121,7 @@ func TestEsdt_ExecuteToggleFreezeSingleNFTTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("freezeSingleNFT", [][]byte{[]byte("esdtToken"), big.NewInt(10).Bytes(), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } func TestEsdt_ExecuteToggleFreezeShouldWorkWithRealBech32Address(t *testing.T) { @@ -1566,7 +1557,7 @@ func TestEsdt_ExecuteWipeInvalidDestShouldFail(t *testing.T) { assert.True(t, strings.Contains(eei.returnMessage, "invalid")) } -func TestEsdt_ExecuteWipeTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteWipeTransferFailsNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1579,9 +1570,6 @@ func TestEsdt_ExecuteWipeTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1590,10 +1578,10 @@ func TestEsdt_ExecuteWipeTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("wipe", [][]byte{[]byte("esdtToken"), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } -func TestEsdt_ExecuteWipeSingleNFTTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteWipeSingleNFTTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1606,9 +1594,6 @@ func TestEsdt_ExecuteWipeSingleNFTTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1617,7 +1602,7 @@ func TestEsdt_ExecuteWipeSingleNFTTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("wipeSingleNFT", [][]byte{[]byte("esdtToken"), big.NewInt(10).Bytes(), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } func TestEsdt_ExecuteWipeShouldWork(t *testing.T) { @@ -2559,6 +2544,63 @@ func TestEsdt_GetSpecialRolesShouldWork(t *testing.T) { assert.Equal(t, []byte("erd1e7n8rzxdtl2n2fl6mrsg4l7stp2elxhfy6l9p7eeafspjhhrjq7qk05usw:ESDTRoleNFTAddQuantity,ESDTRoleNFTCreate,ESDTRoleNFTBurn"), eei.output[1]) } +func TestEsdt_GetSpecialRolesWithEmptyAddressShouldWork(t *testing.T) { + t.Parallel() + + tokenName := []byte("esdtToken") + args := createMockArgumentsForESDT() + eei := createDefaultEei() + args.Eei = eei + + addr := "" + addrBytes, _ := testscommon.RealWorldBech32PubkeyConverter.Decode(addr) + + specialRoles := []*ESDTRoles{ + { + Address: addrBytes, + Roles: [][]byte{ + []byte(core.ESDTRoleLocalMint), + []byte(core.ESDTRoleLocalBurn), + }, + }, + { + Address: addrBytes, + Roles: [][]byte{ + []byte(core.ESDTRoleNFTAddQuantity), + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleNFTBurn), + }, + }, + { + Address: addrBytes, + Roles: [][]byte{ + []byte(vmcommon.ESDTRoleBurnForAll), + }, + }, + } + tokensMap := map[string][]byte{} + marshalizedData, _ := args.Marshalizer.Marshal(ESDTDataV2{ + SpecialRoles: specialRoles, + }) + tokensMap[string(tokenName)] = marshalizedData + eei.storageUpdate[string(eei.scAddress)] = tokensMap + args.Eei = eei + + args.AddressPubKeyConverter = testscommon.RealWorldBech32PubkeyConverter + + e, _ := NewESDTSmartContract(args) + + eei.output = make([][]byte, 0) + vmInput := getDefaultVmInputForFunc("getSpecialRoles", [][]byte{[]byte("esdtToken")}) + output := e.Execute(vmInput) + assert.Equal(t, vmcommon.Ok, output) + + assert.Equal(t, 3, len(eei.output)) + assert.Equal(t, []byte(":ESDTRoleLocalMint,ESDTRoleLocalBurn"), eei.output[0]) + assert.Equal(t, []byte(":ESDTRoleNFTAddQuantity,ESDTRoleNFTCreate,ESDTRoleNFTBurn"), eei.output[1]) + assert.Equal(t, []byte(":ESDTRoleBurnForAll"), eei.output[2]) +} + func TestEsdt_UnsetSpecialRoleWithRemoveEntryFromSpecialRoles(t *testing.T) { t.Parallel() @@ -2760,7 +2802,6 @@ func TestEsdt_SetSpecialRoleCheckBasicOwnershipErr(t *testing.T) { func TestEsdt_SetSpecialRoleNewSendRoleChangeDataErr(t *testing.T) { t.Parallel() - localErr := errors.New("local err") args := createMockArgumentsForESDT() eei := &mock.SystemEIStub{ GetStorageCalled: func(key []byte) []byte { @@ -2770,9 +2811,8 @@ func TestEsdt_SetSpecialRoleNewSendRoleChangeDataErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) - return localErr }, } args.Eei = eei @@ -2807,9 +2847,8 @@ func TestEsdt_SetSpecialRoleAlreadyExists(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) - return nil }, } args.Eei = eei @@ -2846,11 +2885,10 @@ func TestEsdt_SetSpecialRoleCannotSaveToken(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) castedMarshalizer := args.Marshalizer.(*mock.MarshalizerMock) castedMarshalizer.Fail = true - return nil }, } args.Eei = eei @@ -2887,9 +2925,8 @@ func TestEsdt_SetSpecialRoleShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -2931,9 +2968,8 @@ func TestEsdt_SetSpecialRoleNFTShouldErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654e4654437265617465"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -3238,9 +3274,8 @@ func TestEsdt_SetSpecialRoleSFTShouldErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654e46544164645175616e74697479"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -3509,10 +3544,9 @@ func TestEsdt_UnsetSpecialRoleCannotRemoveRoleNotExistsShouldErr(t *testing.T) { require.Equal(t, vmcommon.UserError, retCode) } -func TestEsdt_UnsetSpecialRoleRemoveRoleTransferErr(t *testing.T) { +func TestEsdt_UnsetSpecialRoleRemoveRoleTransfer(t *testing.T) { t.Parallel() - localErr := errors.New("local err") args := createMockArgumentsForESDT() eei := &mock.SystemEIStub{ GetStorageCalled: func(key []byte) []byte { @@ -3528,9 +3562,8 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleTransferErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4d696e74"), input) - return localErr }, } args.Eei = eei @@ -3544,7 +3577,7 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleTransferErr(t *testing.T) { vmInput.GasProvided = 50000000 retCode := e.Execute(vmInput) - require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vmcommon.Ok, retCode) } func TestEsdt_UnsetSpecialRoleRemoveRoleSaveTokenErr(t *testing.T) { @@ -3565,11 +3598,10 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleSaveTokenErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4d696e74"), input) castedMarshalizer := args.Marshalizer.(*mock.MarshalizerMock) castedMarshalizer.Fail = true - return nil }, } args.Eei = eei @@ -3604,9 +3636,8 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4d696e74"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -3713,9 +3744,8 @@ func TestEsdt_StopNFTCreateForeverCallShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@746f6b656e4944@45534454526f6c654e4654437265617465"), input) - return nil }, } args.Eei = eei @@ -3825,10 +3855,9 @@ func TestEsdt_TransferNFTCreateCallShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTNFTCreateRoleTransfer@746f6b656e4944@63616c6c657232"), input) require.Equal(t, destination, []byte("caller3")) - return nil }, } args.Eei = eei @@ -3863,11 +3892,6 @@ func TestEsdt_TransferNFTCreateCallMultiShardShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { - require.Equal(t, []byte("ESDTNFTCreateRoleTransfer@746f6b656e4944@3263616c6c6572"), input) - require.Equal(t, destination, []byte("3caller")) - return nil - }, } args.Eei = eei @@ -4068,7 +4092,7 @@ func TestEsdt_ExecuteChangeSFTToMetaESDT(t *testing.T) { token, _ := e.getExistingToken(vmInput.Arguments[0]) assert.Equal(t, token.NumDecimals, uint32(10)) - assert.Equal(t, token.TokenType, []byte(metaESDT)) + assert.Equal(t, token.TokenType, []byte(core.MetaESDT)) } func TestEsdt_ExecuteIssueSFTAndChangeSFTToMetaESDT(t *testing.T) { @@ -4102,7 +4126,7 @@ func TestEsdt_ExecuteIssueSFTAndChangeSFTToMetaESDT(t *testing.T) { token, _ = e.getExistingToken(fullTicker) assert.Equal(t, token.NumDecimals, uint32(10)) - assert.Equal(t, token.TokenType, []byte(metaESDT)) + assert.Equal(t, token.TokenType, []byte(core.MetaESDT)) output = e.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) @@ -4236,7 +4260,7 @@ func TestEsdt_ExecuteRegisterAndSetMetaESDTShouldSetType(t *testing.T) { registerAndSetAllRolesWithTypeCheck(t, []byte("NFT"), []byte(core.NonFungibleESDT)) registerAndSetAllRolesWithTypeCheck(t, []byte("SFT"), []byte(core.SemiFungibleESDT)) - registerAndSetAllRolesWithTypeCheck(t, []byte("META"), []byte(metaESDT)) + registerAndSetAllRolesWithTypeCheck(t, []byte("META"), []byte(core.MetaESDT)) registerAndSetAllRolesWithTypeCheck(t, []byte("FNG"), []byte(core.FungibleESDT)) } @@ -4406,11 +4430,11 @@ func TestEsdt_CheckRolesOnMetaESDT(t *testing.T) { args.Eei = eei e, _ := NewESDTSmartContract(args) - err := e.checkSpecialRolesAccordingToTokenType([][]byte{[]byte("random")}, &ESDTDataV2{TokenType: []byte(metaESDT)}) + err := e.checkSpecialRolesAccordingToTokenType([][]byte{[]byte("random")}, &ESDTDataV2{TokenType: []byte(core.MetaESDT)}) assert.Nil(t, err) enableEpochsHandler.AddActiveFlags(common.ManagedCryptoAPIsFlag) - err = e.checkSpecialRolesAccordingToTokenType([][]byte{[]byte("random")}, &ESDTDataV2{TokenType: []byte(metaESDT)}) + err = e.checkSpecialRolesAccordingToTokenType([][]byte{[]byte("random")}, &ESDTDataV2{TokenType: []byte(core.MetaESDT)}) assert.Equal(t, err, vm.ErrInvalidArgument) } @@ -4460,3 +4484,332 @@ func TestEsdt_SetNFTCreateRoleAfterStopNFTCreateShouldNotWork(t *testing.T) { output = e.Execute(vmInput) assert.Equal(t, vmcommon.Ok, output) } + +func TestEsdt_UpdateTokenType(t *testing.T) { + t.Parallel() + + args := createMockArgumentsForESDT() + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + eei := createDefaultEei() + args.Eei = eei + + owner := bytes.Repeat([]byte{1}, 32) + tokenName := []byte("TOKEN-ABABAB") + tokensMap := map[string][]byte{} + marshalizedData, _ := args.Marshalizer.Marshal(ESDTDataV2{ + TokenName: tokenName, + OwnerAddress: owner, + CanPause: true, + IsPaused: true, + TokenType: []byte(core.NonFungibleESDT), + CanAddSpecialRoles: true, + }) + tokensMap[string(tokenName)] = marshalizedData + eei.storageUpdate[string(eei.scAddress)] = tokensMap + + e, _ := NewESDTSmartContract(args) + + vmInput := getDefaultVmInputForFunc("setSpecialRole", [][]byte{tokenName, owner, []byte(core.ESDTRoleNFTCreate)}) + vmInput.CallerAddr = owner + output := e.Execute(vmInput) + assert.Equal(t, vmcommon.Ok, output) + + vmInput = getDefaultVmInputForFunc("stopNFTCreate", [][]byte{tokenName}) + vmInput.CallerAddr = owner + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.Ok, output) + + vmInput = getDefaultVmInputForFunc("setSpecialRole", [][]byte{tokenName, owner, []byte(core.ESDTRoleNFTCreate)}) + vmInput.CallerAddr = owner + enableEpochsHandler.AddActiveFlags(common.NFTStopCreateFlag) + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "cannot add NFT create role as NFT creation was stopped")) + + enableEpochsHandler.RemoveActiveFlags(common.NFTStopCreateFlag) + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.Ok, output) +} + +func TestEsdt_ExecuteChangeToMultiShardCreate(t *testing.T) { + t.Parallel() + + args := createMockArgumentsForESDT() + eei := createDefaultEei() + args.Eei = eei + e, _ := NewESDTSmartContract(args) + + vmInput := getDefaultVmInputForFunc("changeToMultiShardCreate", nil) + + eei.returnMessage = "" + eei.gasRemaining = 9999 + output := e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, eei.returnMessage, "invalid number of arguments") + + vmInput.Arguments = [][]byte{[]byte("tokenName")} + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "no ticker with given name")) + + esdtData := &ESDTDataV2{TokenType: []byte(core.NonFungibleESDT), OwnerAddress: vmInput.CallerAddr} + _ = e.saveToken(vmInput.Arguments[0], esdtData) + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "cannot add special roles")) + + esdtData.CanAddSpecialRoles = true + esdtData.CanCreateMultiShard = true + _ = e.saveToken(vmInput.Arguments[0], esdtData) + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "it is already multi shard create")) + + esdtData.CanCreateMultiShard = false + _ = e.saveToken(vmInput.Arguments[0], esdtData) + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "element was not found")) + + esdtData.SpecialRoles = append(esdtData.SpecialRoles, &ESDTRoles{Address: vmInput.CallerAddr, Roles: [][]byte{[]byte(core.ESDTRoleNFTCreate)}}) + _ = e.saveToken(vmInput.Arguments[0], esdtData) + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.Ok, output) +} + +func TestEsdt_UpdateTokenID(t *testing.T) { + t.Parallel() + + args := createMockArgumentsForESDT() + eei := createDefaultEei() + args.Eei = eei + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + e, _ := NewESDTSmartContract(args) + + vmInput := getDefaultVmInputForFunc("updateTokenID", nil) + + enableEpochsHandler.RemoveActiveFlags(common.DynamicESDTFlag) + eei.returnMessage = "" + eei.gasRemaining = 9999 + output := e.Execute(vmInput) + assert.Equal(t, vmcommon.FunctionNotFound, output) + assert.Equal(t, eei.returnMessage, "invalid method to call") + + eei.returnMessage = "" + eei.gasRemaining = 9999 + enableEpochsHandler.AddActiveFlags(common.DynamicESDTFlag) + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.FunctionWrongSignature, output) + assert.Equal(t, eei.returnMessage, "invalid number of arguments, wanted 1") + + vmInput.Arguments = [][]byte{[]byte("tokenName")} + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "no ticker with given name")) + + esdtData := &ESDTDataV2{TokenType: []byte(core.NonFungibleESDT), OwnerAddress: vmInput.CallerAddr} + _ = e.saveToken(vmInput.Arguments[0], esdtData) + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.Ok, output) + + esdtData, _ = e.getExistingToken(vmInput.Arguments[0]) + assert.Equal(t, esdtData.TokenType, []byte(core.NonFungibleESDTv2)) +} + +func TestEsdt_RegisterDynamic(t *testing.T) { + t.Parallel() + + args := createMockArgumentsForESDT() + eei := createDefaultEei() + args.Eei = eei + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + e, _ := NewESDTSmartContract(args) + + vmInput := getDefaultVmInputForFunc("registerDynamic", nil) + + enableEpochsHandler.RemoveActiveFlags(common.DynamicESDTFlag) + eei.returnMessage = "" + eei.gasRemaining = 9999 + output := e.Execute(vmInput) + assert.Equal(t, vmcommon.FunctionNotFound, output) + assert.Equal(t, eei.returnMessage, "invalid method to call") + + eei.returnMessage = "" + eei.gasRemaining = 9999 + enableEpochsHandler.AddActiveFlags(common.DynamicESDTFlag) + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, eei.returnMessage, "not enough arguments") + + vmInput.Arguments = [][]byte{[]byte("tokenName")} + vmInput.CallValue = big.NewInt(0).Set(e.baseIssuingCost) + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "not enough arguments")) + + vmInput.Arguments = [][]byte{[]byte("tokenName"), []byte("ABABAB"), []byte("WRONGTYPE")} + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "invalid argument")) + + vmInput.Arguments = [][]byte{[]byte("tokenName"), []byte("ABABAB"), []byte("META")} + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "not enough arguments")) + + decimals := big.NewInt(20) + vmInput.Arguments = [][]byte{[]byte("tokenName"), []byte("ABABAB"), []byte("META"), decimals.Bytes()} + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + + decimals = big.NewInt(10) + vmInput.Arguments = [][]byte{[]byte("tokenName"), []byte("ABABAB"), []byte("META"), decimals.Bytes(), []byte("wrongextra")} + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + + vmInput.Arguments = [][]byte{[]byte("tokenName"), []byte("ABABAB"), []byte("META"), decimals.Bytes()} + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.Ok, output) +} + +func TestEsdt_RegisterAndSetAllRolesDynamic(t *testing.T) { + t.Parallel() + + args := createMockArgumentsForESDT() + eei := createDefaultEei() + args.Eei = eei + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + e, _ := NewESDTSmartContract(args) + + vmInput := getDefaultVmInputForFunc("registerAndSetAllRolesDynamic", nil) + + enableEpochsHandler.RemoveActiveFlags(common.DynamicESDTFlag) + eei.returnMessage = "" + eei.gasRemaining = 9999 + output := e.Execute(vmInput) + assert.Equal(t, vmcommon.FunctionNotFound, output) + assert.Equal(t, eei.returnMessage, "invalid method to call") + + eei.returnMessage = "" + eei.gasRemaining = 9999 + enableEpochsHandler.AddActiveFlags(common.DynamicESDTFlag) + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, eei.returnMessage, "not enough arguments") + + vmInput.Arguments = [][]byte{[]byte("tokenName")} + vmInput.CallValue = big.NewInt(0).Set(e.baseIssuingCost) + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "not enough arguments")) + + vmInput.Arguments = [][]byte{[]byte("tokenName"), []byte("ABABAB"), []byte("WRONGTYPE")} + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "invalid argument")) + + vmInput.Arguments = [][]byte{[]byte("tokenName"), []byte("ABABAB"), []byte("META")} + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "not enough arguments")) + + decimals := big.NewInt(20) + vmInput.Arguments = [][]byte{[]byte("tokenName"), []byte("ABABAB"), []byte("META"), decimals.Bytes()} + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + + decimals = big.NewInt(10) + vmInput.Arguments = [][]byte{[]byte("tokenName"), []byte("ABABAB"), []byte("META"), decimals.Bytes(), []byte("wrongextra")} + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + + vmInput.Arguments = [][]byte{[]byte("tokenName"), []byte("ABABAB"), []byte("META"), decimals.Bytes()} + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.Ok, output) +} + +func TestEsdt_ChangeToDynamic(t *testing.T) { + t.Parallel() + + args := createMockArgumentsForESDT() + eei := createDefaultEei() + args.Eei = eei + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + e, _ := NewESDTSmartContract(args) + + vmInput := getDefaultVmInputForFunc("changeToDynamic", nil) + + enableEpochsHandler.RemoveActiveFlags(common.DynamicESDTFlag) + eei.returnMessage = "" + eei.gasRemaining = 9999 + output := e.Execute(vmInput) + assert.Equal(t, vmcommon.FunctionNotFound, output) + assert.Equal(t, eei.returnMessage, "invalid method to call") + + eei.returnMessage = "" + eei.gasRemaining = 9999 + enableEpochsHandler.AddActiveFlags(common.DynamicESDTFlag) + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, eei.returnMessage, "not enough arguments") + + vmInput.Arguments = [][]byte{[]byte("tokenName")} + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "no ticker with given name")) + + esdtData := &ESDTDataV2{TokenType: []byte(core.FungibleESDT), OwnerAddress: vmInput.CallerAddr} + _ = e.saveToken(vmInput.Arguments[0], esdtData) + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "cannot change FungibleESDT tokens to dynamic")) + + esdtData.TokenType = []byte(core.DynamicMetaESDT) + _ = e.saveToken(vmInput.Arguments[0], esdtData) + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + assert.True(t, strings.Contains(eei.returnMessage, "tokenID is already dynamic")) + + esdtData.TokenType = []byte(core.MetaESDT) + esdtData.SpecialRoles = append(esdtData.SpecialRoles, &ESDTRoles{Address: vmInput.CallerAddr, Roles: [][]byte{[]byte(core.ESDTRoleNFTCreate), []byte(core.ESDTRoleNFTUpdateAttributes)}}) + esdtData.SpecialRoles = append(esdtData.SpecialRoles, &ESDTRoles{Address: bytes.Repeat([]byte{2}, 32), Roles: [][]byte{[]byte(core.ESDTRoleNFTUpdateAttributes)}}) + + _ = e.saveToken(vmInput.Arguments[0], esdtData) + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.UserError, output) + fmt.Println(eei.returnMessage) + assert.True(t, strings.Contains(eei.returnMessage, vm.ErrCannotChangeToDynamic.Error())) + + esdtData.SpecialRoles[1] = &ESDTRoles{Address: bytes.Repeat([]byte{2}, 32), Roles: [][]byte{[]byte(core.ESDTRoleNFTRecreate)}} + _ = e.saveToken(vmInput.Arguments[0], esdtData) + eei.returnMessage = "" + output = e.Execute(vmInput) + assert.Equal(t, vmcommon.Ok, output) + + esdtData, _ = e.getExistingToken(vmInput.Arguments[0]) + assert.True(t, strings.Contains(string(esdtData.TokenType), core.Dynamic)) +} diff --git a/vm/systemSmartContracts/governance.go b/vm/systemSmartContracts/governance.go index e87190dd1de..8501f73abf4 100644 --- a/vm/systemSmartContracts/governance.go +++ b/vm/systemSmartContracts/governance.go @@ -714,11 +714,7 @@ func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmc g.addToAccumulatedFees(baseConfig.LostProposalFee) } - err = g.eei.Transfer(generalProposal.IssuerAddress, args.RecipientAddr, tokensToReturn, nil, 0) - if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + g.eei.Transfer(generalProposal.IssuerAddress, args.RecipientAddr, tokensToReturn, nil, 0) logEntry := &vmcommon.LogEntry{ Identifier: []byte(args.Function), @@ -831,12 +827,7 @@ func (g *governanceContract) claimAccumulatedFees(args *vmcommon.ContractCallInp accumulatedFees := g.getAccumulatedFees() g.setAccumulatedFees(big.NewInt(0)) - err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, accumulatedFees, nil, 0) - if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - + g.eei.Transfer(args.CallerAddr, args.RecipientAddr, accumulatedFees, nil, 0) return vmcommon.Ok } diff --git a/vm/systemSmartContracts/governance_test.go b/vm/systemSmartContracts/governance_test.go index 8ad11cbe510..168e3c7be60 100644 --- a/vm/systemSmartContracts/governance_test.go +++ b/vm/systemSmartContracts/governance_test.go @@ -405,6 +405,101 @@ func TestGovernanceContract_ChangeConfigOutOfGas(t *testing.T) { require.Equal(t, vmcommon.Ok, retCode) } +func TestGovernanceContract_ValidatorVoteInvalidDelegated(t *testing.T) { + t.Parallel() + + returnMessage := "" + errInvalidVoteSubstr := "invalid delegator address" + callerAddress := vm.FirstDelegationSCAddress + proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) + + args := createMockGovernanceArgs() + + generalProposal := &GeneralProposal{ + CommitHash: proposalIdentifier, + StartVoteEpoch: 10, + EndVoteEpoch: 15, + } + args.Eei = &mock.SystemEIStub{ + GetStorageCalled: func(key []byte) []byte { + if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { + proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) + return proposalBytes + } + + return nil + }, + BlockChainHookCalled: func() vm.BlockchainHook { + return &mock.BlockChainHookStub{ + CurrentNonceCalled: func() uint64 { + return 14 + }, + } + }, + AddReturnMessageCalled: func(msg string) { + returnMessage = msg + }, + } + voteArgs := [][]byte{ + proposalIdentifier, + []byte("yes"), + []byte("delegatedToWrongAddress"), + big.NewInt(1000).Bytes(), + } + + gsc, _ := NewGovernanceContract(args) + callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, returnMessage, errInvalidVoteSubstr) +} + +func TestGovernanceContract_DelegateVoteUserErrors(t *testing.T) { + t.Parallel() + + gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() + blockchainHook.CurrentNonceCalled = func() uint64 { + return 12 + } + + callerAddress := bytes.Repeat([]byte{2}, 32) + proposalIdentifier := []byte("aaaaaaaaa") + generalProposal := &GeneralProposal{ + CommitHash: proposalIdentifier, + StartVoteEpoch: 10, + EndVoteEpoch: 15, + Yes: big.NewInt(0), + No: big.NewInt(0), + Veto: big.NewInt(0), + Abstain: big.NewInt(0), + } + + voteArgs := [][]byte{ + []byte("1"), + []byte("yes"), + } + gsc.eei.SetStorage(voteArgs[0], proposalIdentifier) + _ = gsc.saveGeneralProposal(proposalIdentifier, generalProposal) + + callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, eei.GetReturnMessage(), "invalid number of arguments") + + callInput.Arguments = append(callInput.Arguments, []byte{1}, []byte{2}) + callInput.CallValue = big.NewInt(10) + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "function is not payable")) + + callInput.CallValue = big.NewInt(0) + callInput.GasProvided = 0 + gsc.gasCost.MetaChainSystemSCsCost.DelegateVote = 10 + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.OutOfGas, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough gas")) +} + func TestGovernanceContract_ChangeConfigWrongCaller(t *testing.T) { t.Parallel() @@ -1002,52 +1097,6 @@ func TestGovernanceContract_VoteAfterGovernanceFixesActivationWithOngoingListV1( require.Equal(t, vmcommon.Ok, retCode) } -func TestGovernanceContract_DelegateVoteUserErrors(t *testing.T) { - t.Parallel() - - gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() - blockchainHook.CurrentNonceCalled = func() uint64 { - return 12 - } - - callerAddress := bytes.Repeat([]byte{2}, 32) - proposalIdentifier := []byte("aaaaaaaaa") - generalProposal := &GeneralProposal{ - CommitHash: proposalIdentifier, - StartVoteEpoch: 10, - EndVoteEpoch: 15, - Yes: big.NewInt(0), - No: big.NewInt(0), - Veto: big.NewInt(0), - Abstain: big.NewInt(0), - } - - voteArgs := [][]byte{ - []byte("1"), - []byte("yes"), - } - gsc.eei.SetStorage(voteArgs[0], proposalIdentifier) - _ = gsc.saveGeneralProposal(proposalIdentifier, generalProposal) - - callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.Equal(t, eei.GetReturnMessage(), "invalid number of arguments") - - callInput.Arguments = append(callInput.Arguments, []byte{1}, []byte{2}) - callInput.CallValue = big.NewInt(10) - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.True(t, strings.Contains(eei.GetReturnMessage(), "function is not payable")) - - callInput.CallValue = big.NewInt(0) - callInput.GasProvided = 0 - gsc.gasCost.MetaChainSystemSCsCost.DelegateVote = 10 - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.OutOfGas, retCode) - require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough gas")) -} - func TestGovernanceContract_DelegateVoteMoreErrors(t *testing.T) { t.Parallel() diff --git a/vm/systemSmartContracts/logs.go b/vm/systemSmartContracts/logs.go index 69af22820e1..c40834107f3 100644 --- a/vm/systemSmartContracts/logs.go +++ b/vm/systemSmartContracts/logs.go @@ -64,13 +64,10 @@ func (d *delegation) createAndAddLogEntryForDelegate( function == mergeValidatorDataToDelegation || function == changeOwner { address = contractCallInput.Arguments[0] - - topics = append(topics, contractCallInput.RecipientAddr) - } - if function == core.SCDeployInitFunctionName { - topics = append(topics, contractCallInput.RecipientAddr) } + topics = append(topics, contractCallInput.RecipientAddr) + entry := &vmcommon.LogEntry{ Identifier: []byte("delegate"), Address: address, diff --git a/vm/systemSmartContracts/logs_test.go b/vm/systemSmartContracts/logs_test.go index 5f88b1ddabd..4fc3f536878 100644 --- a/vm/systemSmartContracts/logs_test.go +++ b/vm/systemSmartContracts/logs_test.go @@ -37,6 +37,7 @@ func TestCreateLogEntryForDelegate(t *testing.T) { VMInput: vmcommon.VMInput{ CallerAddr: []byte("caller"), }, + RecipientAddr: []byte("recipient"), }, delegationValue, &GlobalFundData{ @@ -52,7 +53,7 @@ func TestCreateLogEntryForDelegate(t *testing.T) { require.Equal(t, &vmcommon.LogEntry{ Identifier: []byte("delegate"), Address: []byte("caller"), - Topics: [][]byte{delegationValue.Bytes(), big.NewInt(6000).Bytes(), big.NewInt(1).Bytes(), big.NewInt(1001000).Bytes()}, + Topics: [][]byte{delegationValue.Bytes(), big.NewInt(6000).Bytes(), big.NewInt(1).Bytes(), big.NewInt(1001000).Bytes(), []byte("recipient")}, }, res) } diff --git a/vm/systemSmartContracts/staking.go b/vm/systemSmartContracts/staking.go index 004254ce87b..7acfb492d15 100644 --- a/vm/systemSmartContracts/staking.go +++ b/vm/systemSmartContracts/staking.go @@ -24,8 +24,6 @@ var log = logger.GetOrCreate("vm/systemsmartcontracts") const ownerKey = "owner" const nodesConfigKey = "nodesConfig" -const waitingListHeadKey = "waitingList" -const waitingElementPrefix = "w_" type stakingSC struct { eei vm.SystemEI @@ -60,13 +58,6 @@ type ArgsNewStakingSmartContract struct { EnableEpochsHandler common.EnableEpochsHandler } -type waitingListReturnData struct { - blsKeys [][]byte - stakedDataList []*StakedDataV2_0 - lastKey []byte - afterLastjailed bool -} - // NewStakingSmartContract creates a staking smart contract func NewStakingSmartContract( args ArgsNewStakingSmartContract, @@ -218,6 +209,8 @@ func (s *stakingSC) Execute(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return s.fixWaitingListQueueSize(args) case "addMissingNodeToQueue": return s.addMissingNodeToQueue(args) + case "unStakeAllNodesFromQueue": + return s.unStakeAllNodesFromQueue(args) } return vmcommon.UserError @@ -243,6 +236,10 @@ func (s *stakingSC) numSpareNodes() int64 { } func (s *stakingSC) canStake() bool { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return true + } + stakeConfig := s.getConfig() return stakeConfig.StakedNodes < stakeConfig.MaxNumNodes } @@ -503,44 +500,6 @@ func (s *stakingSC) stake(args *vmcommon.ContractCallInput, onlyRegister bool) v return vmcommon.Ok } -func (s *stakingSC) processStake(blsKey []byte, registrationData *StakedDataV2_0, addFirst bool) error { - if registrationData.Staked { - return nil - } - - registrationData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() - if !s.canStake() { - s.eei.AddReturnMessage(fmt.Sprintf("staking is full key put into waiting list %s", hex.EncodeToString(blsKey))) - err := s.addToWaitingList(blsKey, addFirst) - if err != nil { - s.eei.AddReturnMessage("error while adding to waiting") - return err - } - registrationData.Waiting = true - s.eei.Finish([]byte{waiting}) - return nil - } - - err := s.removeFromWaitingList(blsKey) - if err != nil { - s.eei.AddReturnMessage("error while removing from waiting") - return err - } - s.addToStakedNodes(1) - s.activeStakingFor(registrationData) - - return nil -} - -func (s *stakingSC) activeStakingFor(stakingData *StakedDataV2_0) { - stakingData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() - stakingData.Staked = true - stakingData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() - stakingData.UnStakedEpoch = common.DefaultUnstakedEpoch - stakingData.UnStakedNonce = 0 - stakingData.Waiting = false -} - func (s *stakingSC) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { // backward compatibility - no need for return message @@ -573,6 +532,7 @@ func (s *stakingSC) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcomm if registrationData.Staked { s.removeFromStakedNodes() } + if registrationData.Waiting { err = s.removeFromWaitingList(args.Arguments[0]) if err != nil { @@ -595,76 +555,111 @@ func (s *stakingSC) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcomm return vmcommon.Ok } +func (s *stakingSC) activeStakingFor(stakingData *StakedDataV2_0) { + stakingData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + stakingData.Staked = true + stakingData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() + stakingData.UnStakedEpoch = common.DefaultUnstakedEpoch + stakingData.UnStakedNonce = 0 + stakingData.Waiting = false +} + +func (s *stakingSC) processStake(blsKey []byte, registrationData *StakedDataV2_0, addFirst bool) error { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return s.processStakeV2(registrationData) + } + + return s.processStakeV1(blsKey, registrationData, addFirst) +} + +func (s *stakingSC) processStakeV2(registrationData *StakedDataV2_0) error { + if registrationData.Staked { + return nil + } + + registrationData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + s.addToStakedNodes(1) + s.activeStakingFor(registrationData) + + return nil +} + func (s *stakingSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return s.unStakeV2(args) + } + + return s.unStakeV1(args) +} + +func (s *stakingSC) unStakeV2(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + registrationData, retCode := s.checkUnStakeArgs(args) + if retCode != vmcommon.Ok { + return retCode + } + + if !registrationData.Staked { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.ExecutionFailed + } + + return s.tryUnStake(args.Arguments[0], registrationData) +} + +func (s *stakingSC) checkUnStakeArgs(args *vmcommon.ContractCallInput) (*StakedDataV2_0, vmcommon.ReturnCode) { if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { s.eei.AddReturnMessage("unStake function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError + return nil, vmcommon.UserError } if len(args.Arguments) < 2 { s.eei.AddReturnMessage("not enough arguments, needed BLS key and reward address") - return vmcommon.UserError + return nil, vmcommon.UserError } registrationData, err := s.getOrCreateRegisteredData(args.Arguments[0]) if err != nil { s.eei.AddReturnMessage("cannot get or create registered data: error " + err.Error()) - return vmcommon.UserError + return nil, vmcommon.UserError } if len(registrationData.RewardAddress) == 0 { s.eei.AddReturnMessage("cannot unStake a key that is not registered") - return vmcommon.UserError + return nil, vmcommon.UserError } if !bytes.Equal(args.Arguments[1], registrationData.RewardAddress) { s.eei.AddReturnMessage("unStake possible only from staker caller") - return vmcommon.UserError + return nil, vmcommon.UserError } if s.isNodeJailedOrWithBadRating(registrationData, args.Arguments[0]) { s.eei.AddReturnMessage("cannot unStake node which is jailed or with bad rating") - return vmcommon.UserError + return nil, vmcommon.UserError } if !registrationData.Staked && !registrationData.Waiting { s.eei.AddReturnMessage("cannot unStake node which was already unStaked") - return vmcommon.UserError - } - - if !registrationData.Staked { - registrationData.Waiting = false - err = s.removeFromWaitingList(args.Arguments[0]) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - err = s.saveStakingData(args.Arguments[0], registrationData) - if err != nil { - s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok + return nil, vmcommon.UserError } - addOneFromQueue := !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || s.canStakeIfOneRemoved() - if addOneFromQueue { - _, err = s.moveFirstFromWaitingToStaked() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - } + return registrationData, vmcommon.Ok +} +func (s *stakingSC) tryUnStake(key []byte, registrationData *StakedDataV2_0) vmcommon.ReturnCode { if !s.canUnStake() { s.eei.AddReturnMessage("unStake is not possible as too many left") return vmcommon.UserError } s.removeFromStakedNodes() + + return s.doUnStake(key, registrationData) +} + +func (s *stakingSC) doUnStake(key []byte, registrationData *StakedDataV2_0) vmcommon.ReturnCode { registrationData.Staked = false registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() registrationData.Waiting = false - err = s.saveStakingData(args.Arguments[0], registrationData) + err := s.saveStakingData(key, registrationData) if err != nil { s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) return vmcommon.UserError @@ -673,53 +668,6 @@ func (s *stakingSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return vmcommon.Ok } -func (s *stakingSC) moveFirstFromWaitingToStakedIfNeeded(blsKey []byte) (bool, error) { - waitingElementKey := createWaitingListKey(blsKey) - _, err := s.getWaitingListElement(waitingElementKey) - if err == nil { - // node in waiting - remove from it - and that's it - return false, s.removeFromWaitingList(blsKey) - } - - return s.moveFirstFromWaitingToStaked() -} - -func (s *stakingSC) moveFirstFromWaitingToStaked() (bool, error) { - waitingList, err := s.getWaitingListHead() - if err != nil { - return false, err - } - if waitingList.Length == 0 { - return false, nil - } - elementInList, err := s.getWaitingListElement(waitingList.FirstKey) - if err != nil { - return false, err - } - err = s.removeFromWaitingList(elementInList.BLSPublicKey) - if err != nil { - return false, err - } - - nodeData, err := s.getOrCreateRegisteredData(elementInList.BLSPublicKey) - if err != nil { - return false, err - } - if len(nodeData.RewardAddress) == 0 || nodeData.Staked { - return false, vm.ErrInvalidWaitingList - } - - nodeData.Waiting = false - nodeData.Staked = true - nodeData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() - nodeData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() - nodeData.UnStakedNonce = 0 - nodeData.UnStakedEpoch = common.DefaultUnstakedEpoch - - s.addToStakedNodes(1) - return true, s.saveStakingData(elementInList.BLSPublicKey, nodeData) -} - func (s *stakingSC) unBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { s.eei.AddReturnMessage("unBond function not allowed to be called by address " + string(args.CallerAddr)) @@ -809,751 +757,159 @@ func (s *stakingSC) isStaked(args *vmcommon.ContractCallInput) vmcommon.ReturnCo return vmcommon.UserError } -func (s *stakingSC) addToWaitingList(blsKey []byte, addJailed bool) error { - inWaitingListKey := createWaitingListKey(blsKey) - marshaledData := s.eei.GetStorage(inWaitingListKey) - if len(marshaledData) != 0 { - return nil - } - - waitingList, err := s.getWaitingListHead() - if err != nil { - return err +func (s *stakingSC) tryRemoveJailedNodeFromStaked(registrationData *StakedDataV2_0) { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { + s.removeAndSetUnstaked(registrationData) + return } - waitingList.Length += 1 - if waitingList.Length == 1 { - return s.startWaitingList(waitingList, addJailed, blsKey) + if s.canUnStake() { + s.removeAndSetUnstaked(registrationData) + return } - if addJailed { - return s.insertAfterLastJailed(waitingList, blsKey) - } + s.eei.AddReturnMessage("did not switch as not enough validators remaining") +} - return s.addToEndOfTheList(waitingList, blsKey) +func (s *stakingSC) removeAndSetUnstaked(registrationData *StakedDataV2_0) { + s.removeFromStakedNodes() + registrationData.Staked = false + registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() + registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() + registrationData.StakedNonce = math.MaxUint64 } -func (s *stakingSC) startWaitingList( - waitingList *WaitingList, - addJailed bool, - blsKey []byte, -) error { - inWaitingListKey := createWaitingListKey(blsKey) - waitingList.FirstKey = inWaitingListKey - waitingList.LastKey = inWaitingListKey - if addJailed { - waitingList.LastJailedKey = inWaitingListKey +func (s *stakingSC) updateConfigMinNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("updateConfigMinNodes function not allowed to be called by address " + string(args.CallerAddr)) + return vmcommon.UserError } - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: waitingList.LastKey, - NextKey: make([]byte, 0), + stakeConfig := s.getConfig() + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be 1") + return vmcommon.UserError } - return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) -} -func (s *stakingSC) addToEndOfTheList(waitingList *WaitingList, blsKey []byte) error { - inWaitingListKey := createWaitingListKey(blsKey) - oldLastKey := make([]byte, len(waitingList.LastKey)) - copy(oldLastKey, waitingList.LastKey) - - lastElement, err := s.getWaitingListElement(waitingList.LastKey) - if err != nil { - return err - } - lastElement.NextKey = inWaitingListKey - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: oldLastKey, - NextKey: make([]byte, 0), + newMinNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() + if newMinNodes <= 0 { + s.eei.AddReturnMessage("new minimum number of nodes zero or negative") + return vmcommon.UserError } - err = s.saveWaitingListElement(oldLastKey, lastElement) - if err != nil { - return err + if newMinNodes > int64(s.maxNumNodes) { + s.eei.AddReturnMessage("new minimum number of nodes greater than maximum number of nodes") + return vmcommon.UserError } - waitingList.LastKey = inWaitingListKey - return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) -} - -func (s *stakingSC) insertAfterLastJailed( - waitingList *WaitingList, - blsKey []byte, -) error { - inWaitingListKey := createWaitingListKey(blsKey) - if len(waitingList.LastJailedKey) == 0 { - previousFirstKey := make([]byte, len(waitingList.FirstKey)) - copy(previousFirstKey, waitingList.FirstKey) - waitingList.FirstKey = inWaitingListKey - waitingList.LastJailedKey = inWaitingListKey - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: inWaitingListKey, - NextKey: previousFirstKey, - } + stakeConfig.MinNumNodes = newMinNodes + s.setConfig(stakeConfig) - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && len(previousFirstKey) > 0 { - previousFirstElement, err := s.getWaitingListElement(previousFirstKey) - if err != nil { - return err - } - previousFirstElement.PreviousKey = inWaitingListKey - err = s.saveWaitingListElement(previousFirstKey, previousFirstElement) - if err != nil { - return err - } - } + return vmcommon.Ok +} - return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) +func (s *stakingSC) updateConfigMaxNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError } - - lastJailedElement, err := s.getWaitingListElement(waitingList.LastJailedKey) - if err != nil { - return err + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("updateConfigMaxNodes function not allowed to be called by address " + string(args.CallerAddr)) + return vmcommon.UserError } - if bytes.Equal(waitingList.LastKey, waitingList.LastJailedKey) { - waitingList.LastJailedKey = inWaitingListKey - return s.addToEndOfTheList(waitingList, blsKey) + stakeConfig := s.getConfig() + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be 1") + return vmcommon.UserError } - firstNonJailedElement, err := s.getWaitingListElement(lastJailedElement.NextKey) - if err != nil { - return err + newMaxNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() + if newMaxNodes <= 0 { + s.eei.AddReturnMessage("new max number of nodes zero or negative") + return vmcommon.UserError } - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: make([]byte, len(inWaitingListKey)), - NextKey: make([]byte, len(inWaitingListKey)), + if newMaxNodes < int64(s.minNumNodes) { + s.eei.AddReturnMessage("new max number of nodes less than min number of nodes") + return vmcommon.UserError } - copy(elementInWaiting.PreviousKey, waitingList.LastJailedKey) - copy(elementInWaiting.NextKey, lastJailedElement.NextKey) - lastJailedElement.NextKey = inWaitingListKey - firstNonJailedElement.PreviousKey = inWaitingListKey - waitingList.LastJailedKey = inWaitingListKey + prevMaxNumNodes := big.NewInt(stakeConfig.MaxNumNodes) + s.eei.Finish(prevMaxNumNodes.Bytes()) + stakeConfig.MaxNumNodes = newMaxNodes + s.setConfig(stakeConfig) - err = s.saveWaitingListElement(elementInWaiting.PreviousKey, lastJailedElement) - if err != nil { - return err - } - err = s.saveWaitingListElement(elementInWaiting.NextKey, firstNonJailedElement) - if err != nil { - return err - } - err = s.saveWaitingListElement(inWaitingListKey, elementInWaiting) - if err != nil { - return err - } - return s.saveWaitingListHead(waitingList) + return vmcommon.Ok } -func (s *stakingSC) saveElementAndList(key []byte, element *ElementInList, waitingList *WaitingList) error { - err := s.saveWaitingListElement(key, element) - if err != nil { - return err - } - - return s.saveWaitingListHead(waitingList) +func (s *stakingSC) isNodeJailedOrWithBadRating(registrationData *StakedDataV2_0, blsKey []byte) bool { + return registrationData.Jailed || s.eei.CanUnJail(blsKey) || s.eei.IsBadRating(blsKey) } -func (s *stakingSC) removeFromWaitingList(blsKey []byte) error { - inWaitingListKey := createWaitingListKey(blsKey) - marshaledData := s.eei.GetStorage(inWaitingListKey) - if len(marshaledData) == 0 { - return nil +func (s *stakingSC) getRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError } - s.eei.SetStorage(inWaitingListKey, nil) - elementToRemove := &ElementInList{} - err := s.marshalizer.Unmarshal(elementToRemove, marshaledData) - if err != nil { - return err + stakedData, returnCode := s.getStakedDataIfExists(args) + if returnCode != vmcommon.Ok { + return returnCode } - waitingList, err := s.getWaitingListHead() + s.eei.Finish([]byte(hex.EncodeToString(stakedData.RewardAddress))) + return vmcommon.Ok +} + +func (s *stakingSC) getStakedDataIfExists(args *vmcommon.ContractCallInput) (*StakedDataV2_0, vmcommon.ReturnCode) { + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) if err != nil { - return err + s.eei.AddReturnMessage("insufficient gas") + return nil, vmcommon.OutOfGas } - if waitingList.Length == 0 { - return vm.ErrInvalidWaitingList + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be equal to 1") + return nil, vmcommon.UserError } - waitingList.Length -= 1 - if waitingList.Length == 0 { - s.eei.SetStorage([]byte(waitingListHeadKey), nil) - return nil + stakedData, err := s.getOrCreateRegisteredData(args.Arguments[0]) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return nil, vmcommon.UserError + } + if len(stakedData.RewardAddress) == 0 { + s.eei.AddReturnMessage("blsKey not registered in staking sc") + return nil, vmcommon.UserError } - // remove the first element - isCorrectFirstQueueFlagEnabled := s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) - isFirstElementBeforeFix := !isCorrectFirstQueueFlagEnabled && bytes.Equal(elementToRemove.PreviousKey, inWaitingListKey) - isFirstElementAfterFix := isCorrectFirstQueueFlagEnabled && bytes.Equal(waitingList.FirstKey, inWaitingListKey) - if isFirstElementBeforeFix || isFirstElementAfterFix { - if bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { - waitingList.LastJailedKey = make([]byte, 0) - } - - nextElement, errGet := s.getWaitingListElement(elementToRemove.NextKey) - if errGet != nil { - return errGet - } + return stakedData, vmcommon.Ok +} - nextElement.PreviousKey = elementToRemove.NextKey - waitingList.FirstKey = elementToRemove.NextKey - return s.saveElementAndList(elementToRemove.NextKey, nextElement, waitingList) +func (s *stakingSC) getBLSKeyStatus(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError } - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { - waitingList.LastJailedKey = make([]byte, len(elementToRemove.PreviousKey)) - copy(waitingList.LastJailedKey, elementToRemove.PreviousKey) - } - - previousElement, _ := s.getWaitingListElement(elementToRemove.PreviousKey) - // search the other way around for the element in front - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && previousElement == nil { - previousElement, err = s.searchPreviousFromHead(waitingList, inWaitingListKey, elementToRemove) - if err != nil { - return err - } - } - if previousElement == nil { - previousElement, err = s.getWaitingListElement(elementToRemove.PreviousKey) - if err != nil { - return err - } - } - if len(elementToRemove.NextKey) == 0 { - waitingList.LastKey = elementToRemove.PreviousKey - previousElement.NextKey = make([]byte, 0) - return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) - } - - nextElement, err := s.getWaitingListElement(elementToRemove.NextKey) - if err != nil { - return err - } - - nextElement.PreviousKey = elementToRemove.PreviousKey - previousElement.NextKey = elementToRemove.NextKey - - err = s.saveWaitingListElement(elementToRemove.NextKey, nextElement) - if err != nil { - return err - } - return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) -} - -func (s *stakingSC) searchPreviousFromHead(waitingList *WaitingList, inWaitingListKey []byte, elementToRemove *ElementInList) (*ElementInList, error) { - var previousElement *ElementInList - index := uint32(1) - nextKey := make([]byte, len(waitingList.FirstKey)) - copy(nextKey, waitingList.FirstKey) - for len(nextKey) != 0 && index <= waitingList.Length { - element, errGet := s.getWaitingListElement(nextKey) - if errGet != nil { - return nil, errGet - } - - if bytes.Equal(inWaitingListKey, element.NextKey) { - previousElement = element - elementToRemove.PreviousKey = createWaitingListKey(previousElement.BLSPublicKey) - return previousElement, nil - } - - nextKey = make([]byte, len(element.NextKey)) - if len(element.NextKey) == 0 { - break - } - index++ - copy(nextKey, element.NextKey) - } - return nil, vm.ErrElementNotFound -} - -func (s *stakingSC) getWaitingListElement(key []byte) (*ElementInList, error) { - marshaledData := s.eei.GetStorage(key) - if len(marshaledData) == 0 { - return nil, vm.ErrElementNotFound - } - - element := &ElementInList{} - err := s.marshalizer.Unmarshal(element, marshaledData) - if err != nil { - return nil, err - } - - return element, nil -} - -func (s *stakingSC) saveWaitingListElement(key []byte, element *ElementInList) error { - marshaledData, err := s.marshalizer.Marshal(element) - if err != nil { - return err - } - - s.eei.SetStorage(key, marshaledData) - return nil -} - -func (s *stakingSC) getWaitingListHead() (*WaitingList, error) { - waitingList := &WaitingList{ - FirstKey: make([]byte, 0), - LastKey: make([]byte, 0), - Length: 0, - LastJailedKey: make([]byte, 0), - } - marshaledData := s.eei.GetStorage([]byte(waitingListHeadKey)) - if len(marshaledData) == 0 { - return waitingList, nil - } - - err := s.marshalizer.Unmarshal(waitingList, marshaledData) - if err != nil { - return nil, err - } - - return waitingList, nil -} - -func (s *stakingSC) saveWaitingListHead(waitingList *WaitingList) error { - marshaledData, err := s.marshalizer.Marshal(waitingList) - if err != nil { - return err - } - - s.eei.SetStorage([]byte(waitingListHeadKey), marshaledData) - return nil -} - -func createWaitingListKey(blsKey []byte) []byte { - return []byte(waitingElementPrefix + string(blsKey)) -} - -func (s *stakingSC) switchJailedWithWaiting(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("switchJailedWithWaiting function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - return vmcommon.UserError - } - - blsKey := args.Arguments[0] - registrationData, err := s.getOrCreateRegisteredData(blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - if len(registrationData.RewardAddress) == 0 { - s.eei.AddReturnMessage("no need to jail as not a validator") - return vmcommon.UserError - } - if !registrationData.Staked { - s.eei.AddReturnMessage("no need to jail as not a validator") - return vmcommon.UserError - } - if registrationData.Jailed { - s.eei.AddReturnMessage(vm.ErrBLSPublicKeyAlreadyJailed.Error()) - return vmcommon.UserError - } - switched, err := s.moveFirstFromWaitingToStakedIfNeeded(blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - registrationData.NumJailed++ - registrationData.Jailed = true - registrationData.JailedNonce = s.eei.BlockChainHook().CurrentNonce() - - if !switched && !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { - s.eei.AddReturnMessage("did not switch as nobody in waiting, but jailed") - } else { - s.tryRemoveJailedNodeFromStaked(registrationData) - } - - err = s.saveStakingData(blsKey, registrationData) - if err != nil { - s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok -} - -func (s *stakingSC) tryRemoveJailedNodeFromStaked(registrationData *StakedDataV2_0) { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { - s.removeAndSetUnstaked(registrationData) - return - } - - if s.canUnStake() { - s.removeAndSetUnstaked(registrationData) - return - } - - s.eei.AddReturnMessage("did not switch as not enough validators remaining") -} - -func (s *stakingSC) removeAndSetUnstaked(registrationData *StakedDataV2_0) { - s.removeFromStakedNodes() - registrationData.Staked = false - registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() - registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() - registrationData.StakedNonce = math.MaxUint64 -} - -func (s *stakingSC) updateConfigMinNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("updateConfigMinNodes function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - - stakeConfig := s.getConfig() - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be 1") - return vmcommon.UserError - } - - newMinNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() - if newMinNodes <= 0 { - s.eei.AddReturnMessage("new minimum number of nodes zero or negative") - return vmcommon.UserError - } - - if newMinNodes > int64(s.maxNumNodes) { - s.eei.AddReturnMessage("new minimum number of nodes greater than maximum number of nodes") - return vmcommon.UserError - } - - stakeConfig.MinNumNodes = newMinNodes - s.setConfig(stakeConfig) - - return vmcommon.Ok -} - -func (s *stakingSC) updateConfigMaxNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("updateConfigMaxNodes function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - - stakeConfig := s.getConfig() - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be 1") - return vmcommon.UserError - } - - newMaxNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() - if newMaxNodes <= 0 { - s.eei.AddReturnMessage("new max number of nodes zero or negative") - return vmcommon.UserError - } - - if newMaxNodes < int64(s.minNumNodes) { - s.eei.AddReturnMessage("new max number of nodes less than min number of nodes") - return vmcommon.UserError - } - - prevMaxNumNodes := big.NewInt(stakeConfig.MaxNumNodes) - s.eei.Finish(prevMaxNumNodes.Bytes()) - stakeConfig.MaxNumNodes = newMaxNodes - s.setConfig(stakeConfig) - - return vmcommon.Ok -} - -func (s *stakingSC) isNodeJailedOrWithBadRating(registrationData *StakedDataV2_0, blsKey []byte) bool { - return registrationData.Jailed || s.eei.CanUnJail(blsKey) || s.eei.IsBadRating(blsKey) -} - -func (s *stakingSC) getWaitingListIndex(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { - s.eei.AddReturnMessage("this is only a view function") - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be equal to 1") - return vmcommon.UserError - } - - waitingElementKey := createWaitingListKey(args.Arguments[0]) - _, err := s.getWaitingListElement(waitingElementKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - if bytes.Equal(waitingElementKey, waitingListHead.FirstKey) { - s.eei.Finish([]byte(strconv.Itoa(1))) - return vmcommon.Ok - } - if bytes.Equal(waitingElementKey, waitingListHead.LastKey) { - s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) - return vmcommon.Ok - } - - prevElement, err := s.getWaitingListElement(waitingListHead.FirstKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - index := uint32(2) - nextKey := make([]byte, len(waitingElementKey)) - copy(nextKey, prevElement.NextKey) - for len(nextKey) != 0 && index <= waitingListHead.Length { - if bytes.Equal(nextKey, waitingElementKey) { - s.eei.Finish([]byte(strconv.Itoa(int(index)))) - return vmcommon.Ok - } - - prevElement, err = s.getWaitingListElement(nextKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - if len(prevElement.NextKey) == 0 { - break - } - index++ - copy(nextKey, prevElement.NextKey) - } - - s.eei.AddReturnMessage("element in waiting list not found") - return vmcommon.UserError -} - -func (s *stakingSC) getWaitingListSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return vmcommon.OutOfGas - } - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) - return vmcommon.Ok -} - -func (s *stakingSC) getRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - stakedData, returnCode := s.getStakedDataIfExists(args) - if returnCode != vmcommon.Ok { - return returnCode - } - - s.eei.Finish([]byte(hex.EncodeToString(stakedData.RewardAddress))) - return vmcommon.Ok -} - -func (s *stakingSC) getStakedDataIfExists(args *vmcommon.ContractCallInput) (*StakedDataV2_0, vmcommon.ReturnCode) { - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return nil, vmcommon.OutOfGas - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be equal to 1") - return nil, vmcommon.UserError - } - stakedData, err := s.getOrCreateRegisteredData(args.Arguments[0]) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return nil, vmcommon.UserError - } - if len(stakedData.RewardAddress) == 0 { - s.eei.AddReturnMessage("blsKey not registered in staking sc") - return nil, vmcommon.UserError - } - - return stakedData, vmcommon.Ok -} - -func (s *stakingSC) getBLSKeyStatus(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - stakedData, returnCode := s.getStakedDataIfExists(args) - if returnCode != vmcommon.Ok { - return returnCode + stakedData, returnCode := s.getStakedDataIfExists(args) + if returnCode != vmcommon.Ok { + return returnCode } if stakedData.Jailed || s.eei.CanUnJail(args.Arguments[0]) { s.eei.Finish([]byte("jailed")) return vmcommon.Ok } - if stakedData.Waiting { - s.eei.Finish([]byte("queued")) - return vmcommon.Ok - } - if stakedData.Staked { - s.eei.Finish([]byte("staked")) - return vmcommon.Ok - } - - s.eei.Finish([]byte("unStaked")) - return vmcommon.Ok -} - -func (s *stakingSC) getRemainingUnbondPeriod(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - stakedData, returnCode := s.getStakedDataIfExists(args) - if returnCode != vmcommon.Ok { - return returnCode - } - if stakedData.UnStakedNonce == 0 { - s.eei.AddReturnMessage("not in unbond period") - return vmcommon.UserError - } - - currentNonce := s.eei.BlockChainHook().CurrentNonce() - passedNonce := currentNonce - stakedData.UnStakedNonce - if passedNonce >= s.unBondPeriod { - if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.Finish(zero.Bytes()) - } else { - s.eei.Finish([]byte("0")) - } - } else { - remaining := s.unBondPeriod - passedNonce - if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.Finish(big.NewInt(0).SetUint64(remaining).Bytes()) - } else { - s.eei.Finish([]byte(strconv.Itoa(int(remaining)))) - } - } - - return vmcommon.Ok -} - -func (s *stakingSC) getWaitingListRegisterNonceAndRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { - s.eei.AddReturnMessage("this is only a view function") - return vmcommon.UserError - } - if len(args.Arguments) != 0 { - s.eei.AddReturnMessage("number of arguments must be equal to 0") - return vmcommon.UserError - } - - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - if len(waitingListData.stakedDataList) == 0 { - s.eei.AddReturnMessage("no one in waitingList") - return vmcommon.UserError - } - - for index, stakedData := range waitingListData.stakedDataList { - s.eei.Finish(waitingListData.blsKeys[index]) - s.eei.Finish(stakedData.RewardAddress) - s.eei.Finish(big.NewInt(int64(stakedData.RegisterNonce)).Bytes()) - } - - return vmcommon.Ok -} - -func (s *stakingSC) setOwnersOnAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("setOwnersOnAddresses function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - if len(args.Arguments)%2 != 0 { - s.eei.AddReturnMessage("invalid number of arguments: expected an even number of arguments") - return vmcommon.UserError - } - for i := 0; i < len(args.Arguments); i += 2 { - stakedData, err := s.getOrCreateRegisteredData(args.Arguments[i]) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) - return vmcommon.UserError - } - if len(stakedData.RewardAddress) == 0 { - log.Error("staking data does not exists", - "bls key", hex.EncodeToString(args.Arguments[i]), - "owner as hex", hex.EncodeToString(args.Arguments[i+1])) - continue - } - - stakedData.OwnerAddress = args.Arguments[i+1] - err = s.saveStakingData(args.Arguments[i], stakedData) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) - return vmcommon.UserError - } - } - - return vmcommon.Ok -} - -func (s *stakingSC) getOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { - s.eei.AddReturnMessage("this is only a view function") - return vmcommon.UserError - } - if len(args.Arguments) < 1 { - s.eei.AddReturnMessage(fmt.Sprintf("invalid number of arguments: expected min %d, got %d", 1, len(args.Arguments))) - return vmcommon.UserError - } - - stakedData, errGet := s.getOrCreateRegisteredData(args.Arguments[0]) - if errGet != nil { - s.eei.AddReturnMessage(errGet.Error()) - return vmcommon.UserError + if stakedData.Waiting { + s.eei.Finish([]byte("queued")) + return vmcommon.Ok } - if len(stakedData.OwnerAddress) == 0 { - s.eei.AddReturnMessage("owner address is nil") - return vmcommon.UserError + if stakedData.Staked { + s.eei.Finish([]byte("staked")) + return vmcommon.Ok } - s.eei.Finish(stakedData.OwnerAddress) + s.eei.Finish([]byte("unStaked")) return vmcommon.Ok } @@ -1567,212 +923,117 @@ func (s *stakingSC) getTotalNumberOfRegisteredNodes(args *vmcommon.ContractCallI return vmcommon.UserError } - stakeConfig := s.getConfig() waitingListHead, err := s.getWaitingListHead() if err != nil { s.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } + stakeConfig := s.getConfig() totalRegistered := stakeConfig.StakedNodes + stakeConfig.JailedNodes + int64(waitingListHead.Length) s.eei.Finish(big.NewInt(totalRegistered).Bytes()) return vmcommon.Ok } -func (s *stakingSC) resetLastUnJailedFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { - // backward compatibility - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") - return vmcommon.UserError - } - if len(args.Arguments) != 0 { - s.eei.AddReturnMessage("number of arguments must be equal to 0") - return vmcommon.UserError - } - - waitingList, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) +func (s *stakingSC) getRemainingUnbondPeriod(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) return vmcommon.UserError } - if len(waitingList.LastJailedKey) == 0 { - return vmcommon.Ok + stakedData, returnCode := s.getStakedDataIfExists(args) + if returnCode != vmcommon.Ok { + return returnCode } - - waitingList.LastJailedKey = make([]byte, 0) - err = s.saveWaitingListHead(waitingList) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + if stakedData.UnStakedNonce == 0 { + s.eei.AddReturnMessage("not in unbond period") return vmcommon.UserError } - return vmcommon.Ok -} - -func (s *stakingSC) cleanAdditionalQueueNotEnoughFunds( - waitingListData *waitingListReturnData, -) ([]string, map[string][][]byte, error) { - - listOfOwners := make([]string, 0) - mapOwnersUnStakedNodes := make(map[string][][]byte) - mapCheckedOwners := make(map[string]*validatorFundInfo) - for i := len(waitingListData.blsKeys) - 1; i >= 0; i-- { - stakedData := waitingListData.stakedDataList[i] - validatorInfo, err := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, s.stakeValue) - if err != nil { - return nil, nil, err - } - if validatorInfo.numNodesToUnstake == 0 { - continue - } - - validatorInfo.numNodesToUnstake-- - blsKey := waitingListData.blsKeys[i] - err = s.removeFromWaitingList(blsKey) - if err != nil { - return nil, nil, err - } - - registrationData, err := s.getOrCreateRegisteredData(blsKey) - if err != nil { - return nil, nil, err - } - - registrationData.Staked = false - registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() - registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() - registrationData.Waiting = false - - err = s.saveStakingData(blsKey, registrationData) - if err != nil { - return nil, nil, err + currentNonce := s.eei.BlockChainHook().CurrentNonce() + passedNonce := currentNonce - stakedData.UnStakedNonce + if passedNonce >= s.unBondPeriod { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.Finish(zero.Bytes()) + } else { + s.eei.Finish([]byte("0")) } - - _, alreadyAdded := mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] - if !alreadyAdded { - listOfOwners = append(listOfOwners, string(stakedData.OwnerAddress)) + } else { + remaining := s.unBondPeriod - passedNonce + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.Finish(big.NewInt(0).SetUint64(remaining).Bytes()) + } else { + s.eei.Finish([]byte(strconv.Itoa(int(remaining)))) } - - mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] = append(mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)], blsKey) } - return listOfOwners, mapOwnersUnStakedNodes, nil + return vmcommon.Ok } -func (s *stakingSC) stakeNodesFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { +func (s *stakingSC) setOwnersOnAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be equal to 1") + s.eei.AddReturnMessage("setOwnersOnAddresses function not allowed to be called by address " + string(args.CallerAddr)) return vmcommon.UserError } - - numNodesToStake := big.NewInt(0).SetBytes(args.Arguments[0]).Uint64() - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + if len(args.Arguments)%2 != 0 { + s.eei.AddReturnMessage("invalid number of arguments: expected an even number of arguments") return vmcommon.UserError } - if len(waitingListData.blsKeys) == 0 { - s.eei.AddReturnMessage("no nodes in queue") - return vmcommon.Ok - } - - nodePriceToUse := big.NewInt(0).Set(s.minNodePrice) - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { - nodePriceToUse.Set(s.stakeValue) - } - - stakedNodes := uint64(0) - mapCheckedOwners := make(map[string]*validatorFundInfo) - for i, blsKey := range waitingListData.blsKeys { - stakedData := waitingListData.stakedDataList[i] - if stakedNodes >= numNodesToStake { - break - } - - validatorInfo, errCheck := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, nodePriceToUse) - if errCheck != nil { - s.eei.AddReturnMessage(errCheck.Error()) - return vmcommon.UserError - } - if validatorInfo.numNodesToUnstake > 0 { - continue - } - - s.activeStakingFor(stakedData) - err = s.saveStakingData(blsKey, stakedData) + for i := 0; i < len(args.Arguments); i += 2 { + stakedData, err := s.getOrCreateRegisteredData(args.Arguments[i]) if err != nil { s.eei.AddReturnMessage(err.Error()) + s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) return vmcommon.UserError } + if len(stakedData.RewardAddress) == 0 { + log.Error("staking data does not exists", + "bls key", hex.EncodeToString(args.Arguments[i]), + "owner as hex", hex.EncodeToString(args.Arguments[i+1])) + continue + } - // remove from waiting list - err = s.removeFromWaitingList(blsKey) + stakedData.OwnerAddress = args.Arguments[i+1] + err = s.saveStakingData(args.Arguments[i], stakedData) if err != nil { s.eei.AddReturnMessage(err.Error()) + s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) return vmcommon.UserError } - - stakedNodes++ - // return the change key - s.eei.Finish(blsKey) - s.eei.Finish(stakedData.RewardAddress) } - s.addToStakedNodes(int64(stakedNodes)) - return vmcommon.Ok } -func (s *stakingSC) cleanAdditionalQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { +func (s *stakingSC) getOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { + s.eei.AddReturnMessage("this is only a view function") return vmcommon.UserError } - if len(args.Arguments) != 0 { - s.eei.AddReturnMessage("number of arguments must be 0") + if len(args.Arguments) < 1 { + s.eei.AddReturnMessage(fmt.Sprintf("invalid number of arguments: expected min %d, got %d", 1, len(args.Arguments))) return vmcommon.UserError } - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + stakedData, errGet := s.getOrCreateRegisteredData(args.Arguments[0]) + if errGet != nil { + s.eei.AddReturnMessage(errGet.Error()) return vmcommon.UserError } - if len(waitingListData.blsKeys) == 0 { - s.eei.AddReturnMessage("no nodes in queue") - return vmcommon.Ok - } - - listOfOwners, mapOwnersAndBLSKeys, err := s.cleanAdditionalQueueNotEnoughFunds(waitingListData) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + if len(stakedData.OwnerAddress) == 0 { + s.eei.AddReturnMessage("owner address is nil") return vmcommon.UserError } - for _, owner := range listOfOwners { - s.eei.Finish([]byte(owner)) - blsKeys := mapOwnersAndBLSKeys[owner] - for _, blsKey := range blsKeys { - s.eei.Finish(blsKey) - } - } - + s.eei.Finish(stakedData.OwnerAddress) return vmcommon.Ok } @@ -1894,193 +1155,6 @@ func (s *stakingSC) checkValidatorFunds( return validatorInfo, nil } -func (s *stakingSC) getFirstElementsFromWaitingList(numNodes uint32) (*waitingListReturnData, error) { - waitingListData := &waitingListReturnData{} - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - return nil, err - } - if waitingListHead.Length == 0 { - return waitingListData, nil - } - - blsKeysToStake := make([][]byte, 0) - stakedDataList := make([]*StakedDataV2_0, 0) - index := uint32(1) - nextKey := make([]byte, len(waitingListHead.FirstKey)) - copy(nextKey, waitingListHead.FirstKey) - for len(nextKey) != 0 && index <= waitingListHead.Length && index <= numNodes { - element, errGet := s.getWaitingListElement(nextKey) - if errGet != nil { - return nil, errGet - } - - if bytes.Equal(nextKey, waitingListHead.LastJailedKey) { - waitingListData.afterLastjailed = true - } - - stakedData, errGet := s.getOrCreateRegisteredData(element.BLSPublicKey) - if errGet != nil { - return nil, errGet - } - - blsKeysToStake = append(blsKeysToStake, element.BLSPublicKey) - stakedDataList = append(stakedDataList, stakedData) - - if len(element.NextKey) == 0 { - break - } - index++ - copy(nextKey, element.NextKey) - } - - if numNodes >= waitingListHead.Length && len(blsKeysToStake) != int(waitingListHead.Length) { - log.Warn("mismatch length on waiting list elements in stakingSC.getFirstElementsFromWaitingList") - } - - waitingListData.blsKeys = blsKeysToStake - waitingListData.stakedDataList = stakedDataList - waitingListData.lastKey = nextKey - return waitingListData, nil -} - -func (s *stakingSC) fixWaitingListQueueSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return vmcommon.OutOfGas - } - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - if waitingListHead.Length <= 1 { - return vmcommon.Ok - } - - foundLastJailedKey := len(waitingListHead.LastJailedKey) == 0 - - index := uint32(1) - nextKey := make([]byte, len(waitingListHead.FirstKey)) - copy(nextKey, waitingListHead.FirstKey) - for len(nextKey) != 0 && index <= waitingListHead.Length { - element, errGet := s.getWaitingListElement(nextKey) - if errGet != nil { - s.eei.AddReturnMessage(errGet.Error()) - return vmcommon.UserError - } - - if bytes.Equal(waitingListHead.LastJailedKey, nextKey) { - foundLastJailedKey = true - } - - _, errGet = s.getOrCreateRegisteredData(element.BLSPublicKey) - if errGet != nil { - s.eei.AddReturnMessage(errGet.Error()) - return vmcommon.UserError - } - - if len(element.NextKey) == 0 { - break - } - index++ - copy(nextKey, element.NextKey) - } - - waitingListHead.Length = index - waitingListHead.LastKey = nextKey - if !foundLastJailedKey { - waitingListHead.LastJailedKey = make([]byte, 0) - } - - err = s.saveWaitingListHead(waitingListHead) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok -} - -func (s *stakingSC) addMissingNodeToQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return vmcommon.OutOfGas - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("invalid number of arguments") - return vmcommon.UserError - } - - blsKey := args.Arguments[0] - _, err = s.getWaitingListElement(createWaitingListKey(blsKey)) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - for _, keyInList := range waitingListData.blsKeys { - if bytes.Equal(keyInList, blsKey) { - s.eei.AddReturnMessage("key is in queue, not missing") - return vmcommon.UserError - } - } - - waitingList, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - waitingList.Length += 1 - if waitingList.Length == 1 { - err = s.startWaitingList(waitingList, false, blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok - } - - err = s.addToEndOfTheList(waitingList, blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok -} - // CanUseContract returns true if contract can be used func (s *stakingSC) CanUseContract() bool { return true diff --git a/vm/systemSmartContracts/stakingWaitingList.go b/vm/systemSmartContracts/stakingWaitingList.go new file mode 100644 index 00000000000..e08b16b3cde --- /dev/null +++ b/vm/systemSmartContracts/stakingWaitingList.go @@ -0,0 +1,1127 @@ +package systemSmartContracts + +import ( + "bytes" + "encoding/hex" + "fmt" + "math" + "math/big" + "strconv" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +const waitingListHeadKey = "waitingList" +const waitingElementPrefix = "w_" + +type waitingListReturnData struct { + blsKeys [][]byte + stakedDataList []*StakedDataV2_0 + lastKey []byte + afterLastJailed bool +} + +func (s *stakingSC) processStakeV1(blsKey []byte, registrationData *StakedDataV2_0, addFirst bool) error { + if registrationData.Staked { + return nil + } + + registrationData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + if !s.canStake() { + s.eei.AddReturnMessage(fmt.Sprintf("staking is full key put into waiting list %s", hex.EncodeToString(blsKey))) + err := s.addToWaitingList(blsKey, addFirst) + if err != nil { + s.eei.AddReturnMessage("error while adding to waiting") + return err + } + registrationData.Waiting = true + s.eei.Finish([]byte{waiting}) + return nil + } + + err := s.removeFromWaitingList(blsKey) + if err != nil { + s.eei.AddReturnMessage("error while removing from waiting") + return err + } + + s.addToStakedNodes(1) + s.activeStakingFor(registrationData) + + return nil +} + +func (s *stakingSC) unStakeV1(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + registrationData, retCode := s.checkUnStakeArgs(args) + if retCode != vmcommon.Ok { + return retCode + } + + var err error + if !registrationData.Staked { + registrationData.Waiting = false + err = s.removeFromWaitingList(args.Arguments[0]) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + err = s.saveStakingData(args.Arguments[0], registrationData) + if err != nil { + s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok + } + + addOneFromQueue := !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || s.canStakeIfOneRemoved() + if addOneFromQueue { + _, err = s.moveFirstFromWaitingToStaked() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + } + + return s.tryUnStake(args.Arguments[0], registrationData) +} + +func (s *stakingSC) moveFirstFromWaitingToStakedIfNeeded(blsKey []byte) (bool, error) { + waitingElementKey := createWaitingListKey(blsKey) + _, err := s.getWaitingListElement(waitingElementKey) + if err == nil { + // node in waiting - remove from it - and that's it + return false, s.removeFromWaitingList(blsKey) + } + + return s.moveFirstFromWaitingToStaked() +} + +func (s *stakingSC) moveFirstFromWaitingToStaked() (bool, error) { + waitingList, err := s.getWaitingListHead() + if err != nil { + return false, err + } + if waitingList.Length == 0 { + return false, nil + } + elementInList, err := s.getWaitingListElement(waitingList.FirstKey) + if err != nil { + return false, err + } + err = s.removeFromWaitingList(elementInList.BLSPublicKey) + if err != nil { + return false, err + } + + nodeData, err := s.getOrCreateRegisteredData(elementInList.BLSPublicKey) + if err != nil { + return false, err + } + if len(nodeData.RewardAddress) == 0 || nodeData.Staked { + return false, vm.ErrInvalidWaitingList + } + + nodeData.Waiting = false + nodeData.Staked = true + nodeData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + nodeData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() + nodeData.UnStakedNonce = 0 + nodeData.UnStakedEpoch = common.DefaultUnstakedEpoch + + s.addToStakedNodes(1) + return true, s.saveStakingData(elementInList.BLSPublicKey, nodeData) +} + +func (s *stakingSC) addToWaitingList(blsKey []byte, addJailed bool) error { + inWaitingListKey := createWaitingListKey(blsKey) + marshaledData := s.eei.GetStorage(inWaitingListKey) + if len(marshaledData) != 0 { + return nil + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + return err + } + + waitingList.Length += 1 + if waitingList.Length == 1 { + return s.startWaitingList(waitingList, addJailed, blsKey) + } + + if addJailed { + return s.insertAfterLastJailed(waitingList, blsKey) + } + + return s.addToEndOfTheList(waitingList, blsKey) +} + +func (s *stakingSC) startWaitingList( + waitingList *WaitingList, + addJailed bool, + blsKey []byte, +) error { + inWaitingListKey := createWaitingListKey(blsKey) + waitingList.FirstKey = inWaitingListKey + waitingList.LastKey = inWaitingListKey + if addJailed { + waitingList.LastJailedKey = inWaitingListKey + } + + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: waitingList.LastKey, + NextKey: make([]byte, 0), + } + return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) +} + +func (s *stakingSC) addToEndOfTheList(waitingList *WaitingList, blsKey []byte) error { + inWaitingListKey := createWaitingListKey(blsKey) + oldLastKey := make([]byte, len(waitingList.LastKey)) + copy(oldLastKey, waitingList.LastKey) + + lastElement, err := s.getWaitingListElement(waitingList.LastKey) + if err != nil { + return err + } + lastElement.NextKey = inWaitingListKey + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: oldLastKey, + NextKey: make([]byte, 0), + } + + err = s.saveWaitingListElement(oldLastKey, lastElement) + if err != nil { + return err + } + + waitingList.LastKey = inWaitingListKey + return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) +} + +func (s *stakingSC) insertAfterLastJailed( + waitingList *WaitingList, + blsKey []byte, +) error { + inWaitingListKey := createWaitingListKey(blsKey) + if len(waitingList.LastJailedKey) == 0 { + previousFirstKey := make([]byte, len(waitingList.FirstKey)) + copy(previousFirstKey, waitingList.FirstKey) + waitingList.FirstKey = inWaitingListKey + waitingList.LastJailedKey = inWaitingListKey + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: inWaitingListKey, + NextKey: previousFirstKey, + } + + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && len(previousFirstKey) > 0 { + previousFirstElement, err := s.getWaitingListElement(previousFirstKey) + if err != nil { + return err + } + previousFirstElement.PreviousKey = inWaitingListKey + err = s.saveWaitingListElement(previousFirstKey, previousFirstElement) + if err != nil { + return err + } + } + + return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) + } + + lastJailedElement, err := s.getWaitingListElement(waitingList.LastJailedKey) + if err != nil { + return err + } + + if bytes.Equal(waitingList.LastKey, waitingList.LastJailedKey) { + waitingList.LastJailedKey = inWaitingListKey + return s.addToEndOfTheList(waitingList, blsKey) + } + + firstNonJailedElement, err := s.getWaitingListElement(lastJailedElement.NextKey) + if err != nil { + return err + } + + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: make([]byte, len(inWaitingListKey)), + NextKey: make([]byte, len(inWaitingListKey)), + } + copy(elementInWaiting.PreviousKey, waitingList.LastJailedKey) + copy(elementInWaiting.NextKey, lastJailedElement.NextKey) + + lastJailedElement.NextKey = inWaitingListKey + firstNonJailedElement.PreviousKey = inWaitingListKey + waitingList.LastJailedKey = inWaitingListKey + + err = s.saveWaitingListElement(elementInWaiting.PreviousKey, lastJailedElement) + if err != nil { + return err + } + err = s.saveWaitingListElement(elementInWaiting.NextKey, firstNonJailedElement) + if err != nil { + return err + } + err = s.saveWaitingListElement(inWaitingListKey, elementInWaiting) + if err != nil { + return err + } + return s.saveWaitingListHead(waitingList) +} + +func (s *stakingSC) saveElementAndList(key []byte, element *ElementInList, waitingList *WaitingList) error { + err := s.saveWaitingListElement(key, element) + if err != nil { + return err + } + + return s.saveWaitingListHead(waitingList) +} + +func (s *stakingSC) removeFromWaitingList(blsKey []byte) error { + inWaitingListKey := createWaitingListKey(blsKey) + marshaledData := s.eei.GetStorage(inWaitingListKey) + if len(marshaledData) == 0 { + return nil + } + s.eei.SetStorage(inWaitingListKey, nil) + + elementToRemove := &ElementInList{} + err := s.marshalizer.Unmarshal(elementToRemove, marshaledData) + if err != nil { + return err + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + return err + } + if waitingList.Length == 0 { + return vm.ErrInvalidWaitingList + } + waitingList.Length -= 1 + if waitingList.Length == 0 { + s.eei.SetStorage([]byte(waitingListHeadKey), nil) + return nil + } + + // remove the first element + isFirstElementBeforeFix := !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && bytes.Equal(elementToRemove.PreviousKey, inWaitingListKey) + isFirstElementAfterFix := s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && bytes.Equal(waitingList.FirstKey, inWaitingListKey) + if isFirstElementBeforeFix || isFirstElementAfterFix { + if bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { + waitingList.LastJailedKey = make([]byte, 0) + } + + nextElement, errGet := s.getWaitingListElement(elementToRemove.NextKey) + if errGet != nil { + return errGet + } + + nextElement.PreviousKey = elementToRemove.NextKey + waitingList.FirstKey = elementToRemove.NextKey + return s.saveElementAndList(elementToRemove.NextKey, nextElement, waitingList) + } + + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { + waitingList.LastJailedKey = make([]byte, len(elementToRemove.PreviousKey)) + copy(waitingList.LastJailedKey, elementToRemove.PreviousKey) + } + + previousElement, _ := s.getWaitingListElement(elementToRemove.PreviousKey) + // search the other way around for the element in front + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && previousElement == nil { + previousElement, err = s.searchPreviousFromHead(waitingList, inWaitingListKey, elementToRemove) + if err != nil { + return err + } + } + if previousElement == nil { + previousElement, err = s.getWaitingListElement(elementToRemove.PreviousKey) + if err != nil { + return err + } + } + if len(elementToRemove.NextKey) == 0 { + waitingList.LastKey = elementToRemove.PreviousKey + previousElement.NextKey = make([]byte, 0) + return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) + } + + nextElement, err := s.getWaitingListElement(elementToRemove.NextKey) + if err != nil { + return err + } + + nextElement.PreviousKey = elementToRemove.PreviousKey + previousElement.NextKey = elementToRemove.NextKey + + err = s.saveWaitingListElement(elementToRemove.NextKey, nextElement) + if err != nil { + return err + } + return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) +} + +func (s *stakingSC) searchPreviousFromHead(waitingList *WaitingList, inWaitingListKey []byte, elementToRemove *ElementInList) (*ElementInList, error) { + var previousElement *ElementInList + index := uint32(1) + nextKey := make([]byte, len(waitingList.FirstKey)) + copy(nextKey, waitingList.FirstKey) + for len(nextKey) != 0 && index <= waitingList.Length { + element, errGet := s.getWaitingListElement(nextKey) + if errGet != nil { + return nil, errGet + } + + if bytes.Equal(inWaitingListKey, element.NextKey) { + previousElement = element + elementToRemove.PreviousKey = createWaitingListKey(previousElement.BLSPublicKey) + return previousElement, nil + } + + nextKey = make([]byte, len(element.NextKey)) + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + return nil, vm.ErrElementNotFound +} + +func (s *stakingSC) getWaitingListElement(key []byte) (*ElementInList, error) { + marshaledData := s.eei.GetStorage(key) + if len(marshaledData) == 0 { + return nil, vm.ErrElementNotFound + } + + element := &ElementInList{} + err := s.marshalizer.Unmarshal(element, marshaledData) + if err != nil { + return nil, err + } + + return element, nil +} + +func (s *stakingSC) saveWaitingListElement(key []byte, element *ElementInList) error { + marshaledData, err := s.marshalizer.Marshal(element) + if err != nil { + return err + } + + s.eei.SetStorage(key, marshaledData) + return nil +} + +func (s *stakingSC) getWaitingListHead() (*WaitingList, error) { + waitingList := &WaitingList{ + FirstKey: make([]byte, 0), + LastKey: make([]byte, 0), + Length: 0, + LastJailedKey: make([]byte, 0), + } + marshaledData := s.eei.GetStorage([]byte(waitingListHeadKey)) + if len(marshaledData) == 0 { + return waitingList, nil + } + + err := s.marshalizer.Unmarshal(waitingList, marshaledData) + if err != nil { + return nil, err + } + + return waitingList, nil +} + +func (s *stakingSC) saveWaitingListHead(waitingList *WaitingList) error { + marshaledData, err := s.marshalizer.Marshal(waitingList) + if err != nil { + return err + } + + s.eei.SetStorage([]byte(waitingListHeadKey), marshaledData) + return nil +} + +func createWaitingListKey(blsKey []byte) []byte { + return []byte(waitingElementPrefix + string(blsKey)) +} + +func (s *stakingSC) switchJailedWithWaiting(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("switchJailedWithWaiting function not allowed to be called by address " + string(args.CallerAddr)) + return vmcommon.UserError + } + if len(args.Arguments) != 1 { + return vmcommon.UserError + } + + blsKey := args.Arguments[0] + registrationData, err := s.getOrCreateRegisteredData(blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(registrationData.RewardAddress) == 0 { + s.eei.AddReturnMessage("no need to jail as not a validator") + return vmcommon.UserError + } + if !registrationData.Staked { + s.eei.AddReturnMessage("no need to jail as not a validator") + return vmcommon.UserError + } + if registrationData.Jailed { + s.eei.AddReturnMessage(vm.ErrBLSPublicKeyAlreadyJailed.Error()) + return vmcommon.UserError + } + switched, err := s.moveFirstFromWaitingToStakedIfNeeded(blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + registrationData.NumJailed++ + registrationData.Jailed = true + registrationData.JailedNonce = s.eei.BlockChainHook().CurrentNonce() + + if !switched && !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { + s.eei.AddReturnMessage("did not switch as nobody in waiting, but jailed") + } else { + s.tryRemoveJailedNodeFromStaked(registrationData) + } + + err = s.saveStakingData(blsKey, registrationData) + if err != nil { + s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (s *stakingSC) getWaitingListIndex(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { + s.eei.AddReturnMessage("this is only a view function") + return vmcommon.UserError + } + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be equal to 1") + return vmcommon.UserError + } + + waitingElementKey := createWaitingListKey(args.Arguments[0]) + _, err := s.getWaitingListElement(waitingElementKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if bytes.Equal(waitingElementKey, waitingListHead.FirstKey) { + s.eei.Finish([]byte(strconv.Itoa(1))) + return vmcommon.Ok + } + if bytes.Equal(waitingElementKey, waitingListHead.LastKey) { + s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) + return vmcommon.Ok + } + + prevElement, err := s.getWaitingListElement(waitingListHead.FirstKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + index := uint32(2) + nextKey := make([]byte, len(waitingElementKey)) + copy(nextKey, prevElement.NextKey) + for len(nextKey) != 0 && index <= waitingListHead.Length { + if bytes.Equal(nextKey, waitingElementKey) { + s.eei.Finish([]byte(strconv.Itoa(int(index)))) + return vmcommon.Ok + } + + prevElement, err = s.getWaitingListElement(nextKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if len(prevElement.NextKey) == 0 { + break + } + index++ + copy(nextKey, prevElement.NextKey) + } + + s.eei.AddReturnMessage("element in waiting list not found") + return vmcommon.UserError +} + +func (s *stakingSC) getWaitingListSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError + } + + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) + if err != nil { + s.eei.AddReturnMessage("insufficient gas") + return vmcommon.OutOfGas + } + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) + return vmcommon.Ok +} + +func (s *stakingSC) getWaitingListRegisterNonceAndRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { + s.eei.AddReturnMessage("this is only a view function") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be equal to 0") + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.stakedDataList) == 0 { + s.eei.AddReturnMessage("no one in waitingList") + return vmcommon.UserError + } + + for index, stakedData := range waitingListData.stakedDataList { + s.eei.Finish(waitingListData.blsKeys[index]) + s.eei.Finish(stakedData.RewardAddress) + s.eei.Finish(big.NewInt(int64(stakedData.RegisterNonce)).Bytes()) + } + + return vmcommon.Ok +} + +func (s *stakingSC) resetLastUnJailedFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + // backward compatibility + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be equal to 0") + return vmcommon.UserError + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if len(waitingList.LastJailedKey) == 0 { + return vmcommon.Ok + } + + waitingList.LastJailedKey = make([]byte, 0) + err = s.saveWaitingListHead(waitingList) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (s *stakingSC) cleanAdditionalQueueNotEnoughFunds( + waitingListData *waitingListReturnData, +) ([]string, map[string][][]byte, error) { + + listOfOwners := make([]string, 0) + mapOwnersUnStakedNodes := make(map[string][][]byte) + mapCheckedOwners := make(map[string]*validatorFundInfo) + for i := len(waitingListData.blsKeys) - 1; i >= 0; i-- { + stakedData := waitingListData.stakedDataList[i] + validatorInfo, err := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, s.stakeValue) + if err != nil { + return nil, nil, err + } + if validatorInfo.numNodesToUnstake == 0 { + continue + } + + validatorInfo.numNodesToUnstake-- + blsKey := waitingListData.blsKeys[i] + err = s.removeFromWaitingList(blsKey) + if err != nil { + return nil, nil, err + } + + registrationData, err := s.getOrCreateRegisteredData(blsKey) + if err != nil { + return nil, nil, err + } + + registrationData.Staked = false + registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() + registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() + registrationData.Waiting = false + + err = s.saveStakingData(blsKey, registrationData) + if err != nil { + return nil, nil, err + } + + _, alreadyAdded := mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] + if !alreadyAdded { + listOfOwners = append(listOfOwners, string(stakedData.OwnerAddress)) + } + + mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] = append(mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)], blsKey) + } + + return listOfOwners, mapOwnersUnStakedNodes, nil +} + +func (s *stakingSC) stakeNodesFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be equal to 1") + return vmcommon.UserError + } + + numNodesToStake := big.NewInt(0).SetBytes(args.Arguments[0]).Uint64() + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.blsKeys) == 0 { + s.eei.AddReturnMessage("no nodes in queue") + return vmcommon.Ok + } + + nodePriceToUse := big.NewInt(0).Set(s.minNodePrice) + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + nodePriceToUse.Set(s.stakeValue) + } + + stakedNodes := uint64(0) + mapCheckedOwners := make(map[string]*validatorFundInfo) + for i, blsKey := range waitingListData.blsKeys { + stakedData := waitingListData.stakedDataList[i] + if stakedNodes >= numNodesToStake { + break + } + + validatorInfo, errCheck := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, nodePriceToUse) + if errCheck != nil { + s.eei.AddReturnMessage(errCheck.Error()) + return vmcommon.UserError + } + if validatorInfo.numNodesToUnstake > 0 { + continue + } + + s.activeStakingFor(stakedData) + err = s.saveStakingData(blsKey, stakedData) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + // remove from waiting list + err = s.removeFromWaitingList(blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + stakedNodes++ + // return the change key + s.eei.Finish(blsKey) + s.eei.Finish(stakedData.RewardAddress) + } + + s.addToStakedNodes(int64(stakedNodes)) + + return vmcommon.Ok +} + +func (s *stakingSC) unStakeAllNodesFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be equal to 0") + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.blsKeys) == 0 { + return vmcommon.Ok + } + + orderedListOwners := make([]string, 0) + mapOwnerKeys := make(map[string][][]byte) + for i, blsKey := range waitingListData.blsKeys { + registrationData := waitingListData.stakedDataList[i] + result := s.doUnStake(blsKey, registrationData) + if result != vmcommon.Ok { + return result + } + + // delete element from waiting list + inWaitingListKey := createWaitingListKey(blsKey) + s.eei.SetStorage(inWaitingListKey, nil) + + ownerAddr := string(registrationData.OwnerAddress) + _, exists := mapOwnerKeys[ownerAddr] + if !exists { + mapOwnerKeys[ownerAddr] = make([][]byte, 0) + orderedListOwners = append(orderedListOwners, ownerAddr) + } + + mapOwnerKeys[ownerAddr] = append(mapOwnerKeys[ownerAddr], blsKey) + } + + // delete waiting list head element + s.eei.SetStorage([]byte(waitingListHeadKey), nil) + + // call unStakeAtEndOfEpoch from the delegation contracts to compute the new unStaked list + for _, owner := range orderedListOwners { + listOfKeys := mapOwnerKeys[owner] + + if s.eei.BlockChainHook().GetShardOfAddress([]byte(owner)) != core.MetachainShardId { + continue + } + + unStakeCall := "unStakeAtEndOfEpoch" + for _, key := range listOfKeys { + unStakeCall += "@" + hex.EncodeToString(key) + } + returnCode := s.executeOnStakeAtEndOfEpoch([]byte(owner), listOfKeys, args.RecipientAddr) + if returnCode != vmcommon.Ok { + return returnCode + } + } + + return vmcommon.Ok +} + +func (s *stakingSC) executeOnStakeAtEndOfEpoch(destinationAddress []byte, listOfKeys [][]byte, senderAddress []byte) vmcommon.ReturnCode { + unStakeCall := "unStakeAtEndOfEpoch" + for _, key := range listOfKeys { + unStakeCall += "@" + hex.EncodeToString(key) + } + vmOutput, err := s.eei.ExecuteOnDestContext(destinationAddress, senderAddress, big.NewInt(0), []byte(unStakeCall)) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmOutput.ReturnCode +} + +func (s *stakingSC) cleanAdditionalQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be 0") + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.blsKeys) == 0 { + s.eei.AddReturnMessage("no nodes in queue") + return vmcommon.Ok + } + + listOfOwners, mapOwnersAndBLSKeys, err := s.cleanAdditionalQueueNotEnoughFunds(waitingListData) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + for _, owner := range listOfOwners { + s.eei.Finish([]byte(owner)) + blsKeys := mapOwnersAndBLSKeys[owner] + for _, blsKey := range blsKeys { + s.eei.Finish(blsKey) + } + } + + return vmcommon.Ok +} + +func (s *stakingSC) getFirstElementsFromWaitingList(numNodes uint32) (*waitingListReturnData, error) { + waitingListData := &waitingListReturnData{} + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + return nil, err + } + if waitingListHead.Length == 0 { + return waitingListData, nil + } + + blsKeysToStake := make([][]byte, 0) + stakedDataList := make([]*StakedDataV2_0, 0) + index := uint32(1) + nextKey := make([]byte, len(waitingListHead.FirstKey)) + copy(nextKey, waitingListHead.FirstKey) + for len(nextKey) != 0 && index <= waitingListHead.Length && index <= numNodes { + element, errGet := s.getWaitingListElement(nextKey) + if errGet != nil { + return nil, errGet + } + + if bytes.Equal(nextKey, waitingListHead.LastJailedKey) { + waitingListData.afterLastJailed = true + } + + stakedData, errGet := s.getOrCreateRegisteredData(element.BLSPublicKey) + if errGet != nil { + return nil, errGet + } + + blsKeysToStake = append(blsKeysToStake, element.BLSPublicKey) + stakedDataList = append(stakedDataList, stakedData) + + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + + if numNodes >= waitingListHead.Length && len(blsKeysToStake) != int(waitingListHead.Length) { + log.Warn("mismatch length on waiting list elements in stakingSC.getFirstElementsFromWaitingList") + } + + waitingListData.blsKeys = blsKeysToStake + waitingListData.stakedDataList = stakedDataList + waitingListData.lastKey = nextKey + return waitingListData, nil +} + +func (s *stakingSC) fixWaitingListQueueSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError + } + + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) + if err != nil { + s.eei.AddReturnMessage("insufficient gas") + return vmcommon.OutOfGas + } + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if waitingListHead.Length <= 1 { + return vmcommon.Ok + } + + foundLastJailedKey := len(waitingListHead.LastJailedKey) == 0 + + index := uint32(1) + nextKey := make([]byte, len(waitingListHead.FirstKey)) + copy(nextKey, waitingListHead.FirstKey) + for len(nextKey) != 0 && index <= waitingListHead.Length { + element, errGet := s.getWaitingListElement(nextKey) + if errGet != nil { + s.eei.AddReturnMessage(errGet.Error()) + return vmcommon.UserError + } + + if bytes.Equal(waitingListHead.LastJailedKey, nextKey) { + foundLastJailedKey = true + } + + _, errGet = s.getOrCreateRegisteredData(element.BLSPublicKey) + if errGet != nil { + s.eei.AddReturnMessage(errGet.Error()) + return vmcommon.UserError + } + + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + + waitingListHead.Length = index + waitingListHead.LastKey = nextKey + if !foundLastJailedKey { + waitingListHead.LastJailedKey = make([]byte, 0) + } + + err = s.saveWaitingListHead(waitingListHead) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (s *stakingSC) addMissingNodeToQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError + } + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) + if err != nil { + s.eei.AddReturnMessage("insufficient gas") + return vmcommon.OutOfGas + } + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("invalid number of arguments") + return vmcommon.UserError + } + + blsKey := args.Arguments[0] + _, err = s.getWaitingListElement(createWaitingListKey(blsKey)) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + for _, keyInList := range waitingListData.blsKeys { + if bytes.Equal(keyInList, blsKey) { + s.eei.AddReturnMessage("key is in queue, not missing") + return vmcommon.UserError + } + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + waitingList.Length += 1 + if waitingList.Length == 1 { + err = s.startWaitingList(waitingList, false, blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok + } + + err = s.addToEndOfTheList(waitingList, blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} diff --git a/vm/systemSmartContracts/staking_test.go b/vm/systemSmartContracts/staking_test.go index 7f46a417db5..fb92a574945 100644 --- a/vm/systemSmartContracts/staking_test.go +++ b/vm/systemSmartContracts/staking_test.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -53,6 +54,8 @@ func createMockStakingScArgumentsWithSystemScAddresses( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 1.0, + NodeLimitPercentage: 1.0, }, EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub( common.StakeFlag, @@ -95,6 +98,18 @@ func CreateVmContractCallInput() *vmcommon.ContractCallInput { } } +func createArgsVMContext() VMContextArgs { + return VMContextArgs{ + BlockChainHook: &mock.BlockChainHookStub{}, + CryptoHook: hooks.NewVMCryptoHook(), + InputParser: &mock.ArgumentParserMock{}, + ValidatorAccountsDB: &stateMock.AccountsStub{}, + ChanceComputer: &mock.RaterMock{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + UserAccountsDB: &stateMock.AccountsStub{}, + } +} + func TestNewStakingSmartContract_NilSystemEIShouldErr(t *testing.T) { t.Parallel() @@ -998,6 +1013,93 @@ func TestStakingSc_ExecuteIsStaked(t *testing.T) { checkIsStaked(t, stakingSmartContract, callerAddress, stakerPubKey, vmcommon.UserError) } +func TestStakingSc_StakeWithStakingV4(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + + args := createMockStakingScArguments() + stakingAccessAddress := []byte("stakingAccessAddress") + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 4 + args.EnableEpochsHandler = enableEpochsHandler + + argsVMContext := createArgsVMContext() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + args.Eei = eei + + stakingSmartContract, _ := NewStakingSmartContract(args) + + for i := 0; i < 10; i++ { + idxStr := strconv.Itoa(i) + addr := []byte("addr" + idxStr) + doStake(t, stakingSmartContract, stakingAccessAddress, addr, addr) + + if uint64(i) < stakingSmartContract.maxNumNodes { + checkIsStaked(t, stakingSmartContract, addr, addr, vmcommon.Ok) + } else { + checkIsStaked(t, stakingSmartContract, addr, addr, vmcommon.UserError) + require.True(t, strings.Contains(eei.returnMessage, "staking is full")) + eei.returnMessage = "" + } + } + requireRegisteredNodes(t, stakingSmartContract, eei, 4, 6) + + doUnStake(t, stakingSmartContract, stakingAccessAddress, []byte("addr0"), []byte("addr0"), vmcommon.Ok) + requireRegisteredNodes(t, stakingSmartContract, eei, 4, 5) + + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + for i := 5; i < 10; i++ { + idxStr := strconv.Itoa(i) + addr := []byte("addr" + idxStr) + err := stakingSmartContract.removeFromWaitingList(addr) + require.Nil(t, err) + } + + for i := 10; i < 20; i++ { + idxStr := strconv.Itoa(i) + addr := []byte("addr" + idxStr) + doStake(t, stakingSmartContract, stakingAccessAddress, addr, addr) + checkIsStaked(t, stakingSmartContract, addr, addr, vmcommon.Ok) + } + requireRegisteredNodes(t, stakingSmartContract, eei, 14, 0) + + doUnStake(t, stakingSmartContract, stakingAccessAddress, []byte("addr10"), []byte("addr10"), vmcommon.Ok) + requireRegisteredNodes(t, stakingSmartContract, eei, 13, 0) +} + +func TestStakingSc_UnStakeNodeFromWaitingListAfterStakingV4ShouldError(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + + args := createMockStakingScArguments() + stakingAccessAddress := []byte("stakingAccessAddress") + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 2 + args.EnableEpochsHandler = enableEpochsHandler + + argsVMContext := createArgsVMContext() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + args.Eei = eei + + stakingSmartContract, _ := NewStakingSmartContract(args) + + doStake(t, stakingSmartContract, stakingAccessAddress, []byte("address0"), []byte("address0")) + doStake(t, stakingSmartContract, stakingAccessAddress, []byte("address1"), []byte("address1")) + doStake(t, stakingSmartContract, stakingAccessAddress, []byte("address2"), []byte("address2")) + requireRegisteredNodes(t, stakingSmartContract, eei, 2, 1) + + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + eei.returnMessage = "" + doUnStake(t, stakingSmartContract, stakingAccessAddress, []byte("address2"), []byte("address2"), vmcommon.ExecutionFailed) + require.Equal(t, eei.returnMessage, vm.ErrWaitingListDisabled.Error()) +} + func TestStakingSc_StakeWithV1ShouldWork(t *testing.T) { t.Parallel() @@ -1161,14 +1263,7 @@ func TestStakingSc_ExecuteStakeStakeJailAndSwitch(t *testing.T) { _ = json.Unmarshal(marshaledData, stakedData) assert.True(t, stakedData.Jailed) assert.True(t, stakedData.Staked) - - arguments.Function = "getTotalNumberOfRegisteredNodes" - arguments.Arguments = [][]byte{} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) - - lastOutput := eei.output[len(eei.output)-1] - assert.Equal(t, lastOutput, []byte{2}) + requireTotalNumberOfRegisteredNodes(t, stakingSmartContract, eei, big.NewInt(2)) } func TestStakingSc_ExecuteStakeStakeJailAndSwitchWithBoundaries(t *testing.T) { @@ -1305,14 +1400,7 @@ func TestStakingSc_ExecuteStakeStakeJailAndSwitchWithBoundaries(t *testing.T) { _ = json.Unmarshal(marshaledData, stakedData) assert.Equal(t, tt.shouldBeJailed, stakedData.Jailed) assert.Equal(t, tt.shouldBeStaked, stakedData.Staked) - - arguments.Function = "getTotalNumberOfRegisteredNodes" - arguments.Arguments = [][]byte{} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, vmcommon.Ok, retCode) - - lastOutput := eei.output[len(eei.output)-1] - assert.Equal(t, []byte{byte(tt.remainingStakedNodesNumber)}, lastOutput) + requireTotalNumberOfRegisteredNodes(t, stakingSmartContract, eei, big.NewInt(int64(tt.remainingStakedNodesNumber))) }) } } @@ -1447,14 +1535,7 @@ func TestStakingSc_ExecuteStakeStakeStakeJailJailUnJailTwice(t *testing.T) { doGetWaitingListSize(t, stakingSmartContract, eei, 2) outPut = doGetWaitingListRegisterNonceAndRewardAddress(t, stakingSmartContract, eei) assert.Equal(t, 6, len(outPut)) - - arguments.Function = "getTotalNumberOfRegisteredNodes" - arguments.Arguments = [][]byte{} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) - - lastOutput := eei.output[len(eei.output)-1] - assert.Equal(t, lastOutput, []byte{4}) + requireTotalNumberOfRegisteredNodes(t, stakingSmartContract, eei, big.NewInt(4)) } func TestStakingSc_ExecuteStakeUnStakeJailCombinations(t *testing.T) { @@ -3143,7 +3224,7 @@ func doGetStatus(t *testing.T, sc *stakingSC, eei *vmContext, blsKey []byte, exp assert.Equal(t, vmcommon.Ok, retCode) lastOutput := eei.output[len(eei.output)-1] - assert.True(t, bytes.Equal(lastOutput, []byte(expectedStatus))) + assert.Equal(t, expectedStatus, string(lastOutput)) } func doGetWaitingListSize(t *testing.T, sc *stakingSC, eei *vmContext, expectedSize int) { @@ -3337,6 +3418,150 @@ func TestStakingSc_fixMissingNodeAddOneNodeOnly(t *testing.T) { assert.Equal(t, waitingListData.blsKeys[0], blsKey) } +func TestStakingSC_StakingV4Flags(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakeFlag) + enableEpochsHandler.AddActiveFlags(common.CorrectLastUnJailedFlag) + enableEpochsHandler.AddActiveFlags(common.CorrectFirstQueuedFlag) + enableEpochsHandler.AddActiveFlags(common.CorrectJailedNotUnStakedEmptyQueueFlag) + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + + argsVMContext := createArgsVMContext() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + + args := createMockStakingScArguments() + args.Eei = eei + args.EnableEpochsHandler = enableEpochsHandler + stakingSmartContract, _ := NewStakingSmartContract(args) + + // Functions which are not allowed starting STAKING V4 INIT + arguments := CreateVmContractCallInput() + arguments.Function = "getQueueIndex" + retCode := stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "getQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "fixWaitingListQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "addMissingNodeToQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + // Functions which are allowed to be called by systemSC at the end of the epoch in epoch = STAKING V4 INIT + eei.CleanCache() + arguments.Function = "switchJailedWithWaiting" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "function not allowed to be called by address")) + + eei.CleanCache() + arguments.Function = "resetLastUnJailedFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "can be called by endOfEpochAccess address only")) + + eei.CleanCache() + arguments.Function = "stakeNodesFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "can be called by endOfEpochAccess address only")) + + eei.CleanCache() + arguments.Function = "cleanAdditionalQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "can be called by endOfEpochAccess address only")) + + enableEpochsHandler.RemoveActiveFlags(common.StakingV4Step1Flag) + // All functions from above are not allowed anymore starting STAKING V4 epoch + eei.CleanCache() + arguments.Function = "getQueueIndex" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "getQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "fixWaitingListQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "addMissingNodeToQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "switchJailedWithWaiting" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "resetLastUnJailedFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "stakeNodesFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "cleanAdditionalQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) +} + +func requireRegisteredNodes(t *testing.T, stakingSC *stakingSC, eei *vmContext, stakedNodes int64, waitingListNodes uint32) { + stakeConfig := stakingSC.getConfig() + waitingList, _ := stakingSC.getWaitingListHead() + require.Equal(t, stakedNodes, stakeConfig.StakedNodes) + require.Equal(t, waitingListNodes, waitingList.Length) + + requireTotalNumberOfRegisteredNodes(t, stakingSC, eei, big.NewInt(stakedNodes+int64(waitingListNodes))) +} + +func requireTotalNumberOfRegisteredNodes(t *testing.T, stakingSC *stakingSC, eei *vmContext, expectedRegisteredNodes *big.Int) { + arguments := CreateVmContractCallInput() + arguments.Function = "getTotalNumberOfRegisteredNodes" + arguments.Arguments = [][]byte{} + + retCode := stakingSC.Execute(arguments) + lastOutput := eei.output[len(eei.output)-1] + noOfRegisteredNodes := big.NewInt(0).SetBytes(lastOutput) + require.Equal(t, retCode, vmcommon.Ok) + require.Equal(t, expectedRegisteredNodes, noOfRegisteredNodes) +} + func TestStakingSc_fixMissingNodeAddAsLast(t *testing.T) { t.Parallel() @@ -3366,3 +3591,229 @@ func TestStakingSc_fixMissingNodeAddAsLast(t *testing.T) { assert.Equal(t, len(waitingListData.blsKeys), 4) assert.Equal(t, waitingListData.blsKeys[3], blsKey) } + +func TestStakingSC_UnStakeAllFromQueueErrors(t *testing.T) { + t.Parallel() + + blockChainHook := &mock.BlockChainHookStub{} + blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { + return nil, 0, nil + } + + eei := createDefaultEei() + eei.blockChainHook = blockChainHook + eei.SetSCAddress([]byte("addr")) + + stakingAccessAddress := vm.ValidatorSCAddress + args := createMockStakingScArguments() + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 1 + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + + args.Eei = eei + args.StakingSCConfig.UnBondPeriod = 100 + sc, _ := NewStakingSmartContract(args) + + vmInput := CreateVmContractCallInput() + vmInput.Function = "unStakeAllNodesFromQueue" + + returnCode := sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.UserError) + require.Equal(t, eei.returnMessage, "invalid method to call") + + eei.returnMessage = "" + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + returnCode = sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.UserError) + require.Equal(t, eei.returnMessage, "stake nodes from waiting list can be called by endOfEpochAccess address only") + + eei.returnMessage = "" + vmInput.CallerAddr = []byte("endOfEpoch") + vmInput.Arguments = [][]byte{{1}} + returnCode = sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.UserError) + require.Equal(t, "number of arguments must be equal to 0", eei.returnMessage) + + vmInput.Arguments = [][]byte{} + returnCode = sc.Execute(vmInput) + require.Equal(t, returnCode, vmcommon.Ok) +} + +func TestStakingSc_UnStakeAllFromQueue(t *testing.T) { + t.Parallel() + + blockChainHook := &mock.BlockChainHookStub{} + blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { + return nil, 0, nil + } + + eei := createDefaultEei() + eei.blockChainHook = blockChainHook + eei.SetSCAddress([]byte("addr")) + + stakingAccessAddress := vm.ValidatorSCAddress + args := createMockStakingScArguments() + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 1 + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + args.Eei = eei + args.StakingSCConfig.UnBondPeriod = 100 + stakingSmartContract, _ := NewStakingSmartContract(args) + + stakerAddress := []byte("stakerAddr") + + blockChainHook.CurrentNonceCalled = func() uint64 { + return 1 + } + + // do stake should work + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("firstKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("secondKey")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) + + waitingReturn := doGetWaitingListRegisterNonceAndRewardAddress(t, stakingSmartContract, eei) + assert.Equal(t, len(waitingReturn), 9) + + arguments := CreateVmContractCallInput() + validatorData := &ValidatorDataV2{ + TotalStakeValue: big.NewInt(400), + TotalUnstaked: big.NewInt(0), + RewardAddress: stakerAddress, + BlsPubKeys: [][]byte{[]byte("firstKey "), []byte("secondKey"), []byte("thirdKey "), []byte("fourthKey")}, + } + arguments.CallerAddr = stakingSmartContract.endOfEpochAccessAddr + marshaledData, _ := stakingSmartContract.marshalizer.Marshal(validatorData) + eei.SetStorageForAddress(vm.ValidatorSCAddress, stakerAddress, marshaledData) + + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + arguments.Function = "unStakeAllNodesFromQueue" + retCode := stakingSmartContract.Execute(arguments) + assert.Equal(t, retCode, vmcommon.Ok) + + assert.Equal(t, len(eei.GetStorage([]byte(waitingListHeadKey))), 0) + newHead, _ := stakingSmartContract.getWaitingListHead() + assert.Equal(t, uint32(0), newHead.Length) // no entries in the queue list + + doGetStatus(t, stakingSmartContract, eei, []byte("secondKey"), "unStaked") + + // stake them again - as they were deleted from waiting list + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) + + // surprisingly, the queue works again as we did not activate the staking v4 + doGetStatus(t, stakingSmartContract, eei, []byte("thirdKey "), "staked") + doGetStatus(t, stakingSmartContract, eei, []byte("fourthKey"), "staked") +} + +func TestStakingSc_UnStakeAllFromQueueWithDelegationContracts(t *testing.T) { + t.Parallel() + + blockChainHook := &mock.BlockChainHookStub{} + blockChainHook.GetStorageDataCalled = func(accountsAddress []byte, index []byte) ([]byte, uint32, error) { + return nil, 0, nil + } + blockChainHook.GetShardOfAddressCalled = func(address []byte) uint32 { + return core.MetachainShardId + } + + eei := createDefaultEei() + eei.blockChainHook = blockChainHook + eei.SetSCAddress([]byte("addr")) + + delegationSC, _ := createDelegationContractAndEEI() + delegationSC.eei = eei + + systemSCContainerStub := &mock.SystemSCContainerStub{GetCalled: func(key []byte) (vm.SystemSmartContract, error) { + return delegationSC, nil + }} + _ = eei.SetSystemSCContainer(systemSCContainerStub) + + stakingAccessAddress := vm.ValidatorSCAddress + args := createMockStakingScArguments() + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 1 + enableEpochsHandler, _ := args.EnableEpochsHandler.(*enableEpochsHandlerMock.EnableEpochsHandlerStub) + args.Eei = eei + args.StakingSCConfig.UnBondPeriod = 100 + stakingSmartContract, _ := NewStakingSmartContract(args) + + stakerAddress := []byte("stakerAddr") + + blockChainHook.CurrentNonceCalled = func() uint64 { + return 1 + } + + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + enableEpochsHandler.AddActiveFlags(common.StakeFlag) + + // do stake should work + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("firstKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("secondKey")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) + + waitingReturn := doGetWaitingListRegisterNonceAndRewardAddress(t, stakingSmartContract, eei) + requireSliceContains(t, waitingReturn, [][]byte{[]byte("secondKey"), []byte("thirdKey "), []byte("fourthKey")}) + + dStatus := &DelegationContractStatus{ + StakedKeys: make([]*NodesData, 4), + NotStakedKeys: nil, + UnStakedKeys: nil, + NumUsers: 0, + } + dStatus.StakedKeys[0] = &NodesData{BLSKey: []byte("firstKey ")} + dStatus.StakedKeys[1] = &NodesData{BLSKey: []byte("secondKey")} + dStatus.StakedKeys[2] = &NodesData{BLSKey: []byte("thirdKey ")} + dStatus.StakedKeys[3] = &NodesData{BLSKey: []byte("fourthKey")} + + marshaledData, _ := delegationSC.marshalizer.Marshal(dStatus) + eei.SetStorageForAddress(stakerAddress, []byte(delegationStatusKey), marshaledData) + + arguments := CreateVmContractCallInput() + arguments.RecipientAddr = vm.StakingSCAddress + validatorData := &ValidatorDataV2{ + TotalStakeValue: big.NewInt(400), + TotalUnstaked: big.NewInt(0), + RewardAddress: stakerAddress, + BlsPubKeys: [][]byte{[]byte("firstKey "), []byte("secondKey"), []byte("thirdKey "), []byte("fourthKey")}, + } + arguments.CallerAddr = stakingSmartContract.endOfEpochAccessAddr + marshaledData, _ = stakingSmartContract.marshalizer.Marshal(validatorData) + eei.SetStorageForAddress(vm.ValidatorSCAddress, stakerAddress, marshaledData) + + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + + arguments.Function = "unStakeAllNodesFromQueue" + retCode := stakingSmartContract.Execute(arguments) + fmt.Println(eei.returnMessage) + assert.Equal(t, retCode, vmcommon.Ok) + + assert.Equal(t, len(eei.GetStorage([]byte(waitingListHeadKey))), 0) + newHead, _ := stakingSmartContract.getWaitingListHead() + assert.Equal(t, uint32(0), newHead.Length) // no entries in the queue list + + marshaledData = eei.GetStorageFromAddress(stakerAddress, []byte(delegationStatusKey)) + _ = stakingSmartContract.marshalizer.Unmarshal(dStatus, marshaledData) + assert.Equal(t, len(dStatus.UnStakedKeys), 3) + assert.Equal(t, len(dStatus.StakedKeys), 1) + + doGetStatus(t, stakingSmartContract, eei, []byte("secondKey"), "unStaked") + doGetStatus(t, stakingSmartContract, eei, []byte("thirdKey "), "unStaked") + doGetStatus(t, stakingSmartContract, eei, []byte("fourthKey"), "unStaked") + + // stake them again - as they were deleted from waiting list + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("thirdKey ")) + doStake(t, stakingSmartContract, stakingAccessAddress, stakerAddress, []byte("fourthKey")) + + doGetStatus(t, stakingSmartContract, eei, []byte("thirdKey "), "staked") + doGetStatus(t, stakingSmartContract, eei, []byte("fourthKey"), "staked") +} + +func requireSliceContains(t *testing.T, s1, s2 [][]byte) { + for _, elemInS2 := range s2 { + require.Contains(t, s1, elemInS2) + } +} diff --git a/vm/systemSmartContracts/validator.go b/vm/systemSmartContracts/validator.go index 86350b5ef34..37799ccc447 100644 --- a/vm/systemSmartContracts/validator.go +++ b/vm/systemSmartContracts/validator.go @@ -21,6 +21,8 @@ import ( const unJailedFunds = "unJailFunds" const unStakeUnBondPauseKey = "unStakeUnBondPause" +const minPercentage = 0.0001 +const numberOfNodesTooHigh = "number of nodes too high, no new nodes activated" var zero = big.NewInt(0) @@ -51,6 +53,9 @@ type validatorSC struct { governanceSCAddress []byte shardCoordinator sharding.Coordinator enableEpochsHandler common.EnableEpochsHandler + nodesCoordinator vm.NodesCoordinator + totalStakeLimit *big.Int + nodeLimitPercentage float64 } // ArgsValidatorSmartContract is the arguments structure to create a new ValidatorSmartContract @@ -69,6 +74,7 @@ type ArgsValidatorSmartContract struct { GovernanceSCAddress []byte ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinator vm.NodesCoordinator } // NewValidatorSmartContract creates an validator smart contract @@ -120,6 +126,15 @@ func NewValidatorSmartContract( if err != nil { return nil, err } + if check.IfNil(args.NodesCoordinator) { + return nil, fmt.Errorf("%w in validatorSC", vm.ErrNilNodesCoordinator) + } + if args.StakingSCConfig.NodeLimitPercentage < minPercentage { + return nil, fmt.Errorf("%w in validatorSC", vm.ErrInvalidNodeLimitPercentage) + } + if args.StakingSCConfig.StakeLimitPercentage < minPercentage { + return nil, fmt.Errorf("%w in validatorSC", vm.ErrInvalidStakeLimitPercentage) + } baseConfig := ValidatorConfig{ TotalSupply: big.NewInt(0).Set(args.GenesisTotalSupply), @@ -151,7 +166,7 @@ func NewValidatorSmartContract( return nil, vm.ErrInvalidMinCreationDeposit } - return &validatorSC{ + reg := &validatorSC{ eei: args.Eei, unBondPeriod: args.StakingSCConfig.UnBondPeriod, unBondPeriodInEpochs: args.StakingSCConfig.UnBondPeriodInEpochs, @@ -169,7 +184,16 @@ func NewValidatorSmartContract( governanceSCAddress: args.GovernanceSCAddress, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, - }, nil + nodeLimitPercentage: args.StakingSCConfig.NodeLimitPercentage, + nodesCoordinator: args.NodesCoordinator, + } + + reg.totalStakeLimit = core.GetIntTrimmedPercentageOfValue(args.GenesisTotalSupply, args.StakingSCConfig.StakeLimitPercentage) + if reg.totalStakeLimit.Cmp(baseConfig.NodePrice) < 0 { + return nil, fmt.Errorf("%w, value is %f", vm.ErrInvalidStakeLimitPercentage, args.StakingSCConfig.StakeLimitPercentage) + } + + return reg, nil } // Execute calls one of the functions from the validator smart contract and runs the code according to the input @@ -388,11 +412,7 @@ func (v *validatorSC) unJail(args *vmcommon.ContractCallInput) vmcommon.ReturnCo } if transferBack.Cmp(zero) > 0 { - err = v.eei.Transfer(args.CallerAddr, args.RecipientAddr, transferBack, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unJail function") - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, transferBack, nil, 0) } finalUnJailFunds := big.NewInt(0).Sub(args.CallValue, transferBack) @@ -628,7 +648,12 @@ func (v *validatorSC) registerBLSKeys( return nil, nil, err } + newlyAddedKeys := make([][]byte, 0) for _, blsKey := range newKeys { + if v.isNumberOfNodesTooHigh(len(registrationData.BlsPubKeys) + 1) { + break + } + vmOutput, errExec := v.executeOnStakingSC([]byte("register@" + hex.EncodeToString(blsKey) + "@" + hex.EncodeToString(registrationData.RewardAddress) + "@" + @@ -649,9 +674,10 @@ func (v *validatorSC) registerBLSKeys( } registrationData.BlsPubKeys = append(registrationData.BlsPubKeys, blsKey) + newlyAddedKeys = append(newlyAddedKeys, blsKey) } - return blsKeys, newKeys, nil + return blsKeys, newlyAddedKeys, nil } func (v *validatorSC) updateStakeValue(registrationData *ValidatorDataV2, caller []byte) vmcommon.ReturnCode { @@ -796,6 +822,11 @@ func (v *validatorSC) reStakeUnStakedNodes(args *vmcommon.ContractCallInput) vmc return vmcommon.UserError } + if v.isNumberOfNodesTooHigh(len(registrationData.BlsPubKeys)) { + v.eei.AddReturnMessage("number of nodes is too high") + return vmcommon.UserError + } + numQualified := big.NewInt(0).Div(registrationData.TotalStakeValue, validatorConfig.NodePrice) if uint64(len(args.Arguments)) > numQualified.Uint64() { v.eei.AddReturnMessage("insufficient funds") @@ -898,6 +929,27 @@ func (v *validatorSC) checkAllGivenKeysAreUnStaked(registrationData *ValidatorDa return mapBlsKeys, nil } +func (v *validatorSC) isStakeTooHigh(registrationData *ValidatorDataV2) bool { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeLimitsFlag) { + return false + } + + return registrationData.TotalStakeValue.Cmp(v.totalStakeLimit) > 0 +} + +func (v *validatorSC) isNumberOfNodesTooHigh(numNodes int) bool { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeLimitsFlag) { + return false + } + + return numNodes > v.computeNodeLimit() +} + +func (v *validatorSC) computeNodeLimit() int { + nodeLimit := float64(v.nodesCoordinator.GetNumTotalEligible()) * v.nodeLimitPercentage + return int(nodeLimit) +} + func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { err := v.eei.UseGas(v.gasCost.MetaChainSystemSCsCost.Stake) if err != nil { @@ -931,6 +983,11 @@ func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return vmcommon.UserError } + if v.isStakeTooHigh(registrationData) { + v.eei.AddReturnMessage("total stake limit reached") + return vmcommon.UserError + } + lenArgs := len(args.Arguments) if lenArgs == 0 { return v.updateStakeValue(registrationData, args.CallerAddr) @@ -1018,31 +1075,73 @@ func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod } } - v.activateStakingFor( + v.activateNewBLSKeys(registrationData, blsKeys, newKeys, &validatorConfig, args) + + err = v.saveRegistrationData(args.CallerAddr, registrationData) + if err != nil { + v.eei.AddReturnMessage("cannot save registration data: error " + err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (v *validatorSC) activateNewBLSKeys( + registrationData *ValidatorDataV2, + blsKeys [][]byte, + newKeys [][]byte, + validatorConfig *ValidatorConfig, + args *vmcommon.ContractCallInput, +) { + numRegisteredBlsKeys := len(registrationData.BlsPubKeys) + allNodesActivated := v.activateStakingFor( blsKeys, + newKeys, registrationData, validatorConfig.NodePrice, registrationData.RewardAddress, args.CallerAddr, ) - err = v.saveRegistrationData(args.CallerAddr, registrationData) - if err != nil { - v.eei.AddReturnMessage("cannot save registration data: error " + err.Error()) - return vmcommon.UserError + if !allNodesActivated && len(blsKeys) > 0 { + nodeLimit := int64(v.computeNodeLimit()) + entry := &vmcommon.LogEntry{ + Identifier: []byte(args.Function), + Address: args.RecipientAddr, + Topics: [][]byte{ + []byte(numberOfNodesTooHigh), + big.NewInt(int64(numRegisteredBlsKeys)).Bytes(), + big.NewInt(nodeLimit).Bytes(), + }, + } + v.eei.AddLogEntry(entry) } - return vmcommon.Ok } func (v *validatorSC) activateStakingFor( blsKeys [][]byte, + newKeys [][]byte, registrationData *ValidatorDataV2, fixedStakeValue *big.Int, rewardAddress []byte, ownerAddress []byte, -) { - numRegistered := uint64(registrationData.NumRegistered) +) bool { + numActivatedKey := uint64(registrationData.NumRegistered) + + numAllBLSKeys := len(registrationData.BlsPubKeys) + if v.isNumberOfNodesTooHigh(numAllBLSKeys) { + return false + } + + maxNumNodesToActivate := len(blsKeys) + if v.enableEpochsHandler.IsFlagEnabled(common.StakeLimitsFlag) { + maxNumNodesToActivate = v.computeNodeLimit() - numAllBLSKeys + len(newKeys) + } + nodesActivated := 0 + if nodesActivated >= maxNumNodesToActivate && len(blsKeys) >= maxNumNodesToActivate { + return false + } for i := uint64(0); i < uint64(len(blsKeys)); i++ { currentBLSKey := blsKeys[i] @@ -1061,12 +1160,19 @@ func (v *validatorSC) activateStakingFor( } if stakedData.UnStakedNonce == 0 { - numRegistered++ + numActivatedKey++ + } + + nodesActivated++ + if nodesActivated >= maxNumNodesToActivate { + break } } - registrationData.NumRegistered = uint32(numRegistered) - registrationData.LockedStake.Mul(fixedStakeValue, big.NewInt(0).SetUint64(numRegistered)) + registrationData.NumRegistered = uint32(numActivatedKey) + registrationData.LockedStake.Mul(fixedStakeValue, big.NewInt(0).SetUint64(numActivatedKey)) + + return nodesActivated < maxNumNodesToActivate || len(blsKeys) <= maxNumNodesToActivate } func (v *validatorSC) stakeOneNode( @@ -1370,11 +1476,7 @@ func (v *validatorSC) unBondV1(args *vmcommon.ContractCallInput) vmcommon.Return } } - err := v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unBond function") - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) return vmcommon.Ok } @@ -1409,11 +1511,7 @@ func (v *validatorSC) unBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCo return returnCode } - err := v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unBond function") - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) return vmcommon.Ok } @@ -1500,11 +1598,7 @@ func (v *validatorSC) claim(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return vmcommon.UserError } - err = v.eei.Transfer(args.CallerAddr, args.RecipientAddr, claimable, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on finalizeUnStake function: error " + err.Error()) - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, claimable, nil, 0) return vmcommon.Ok } @@ -1703,12 +1797,7 @@ func (v *validatorSC) unBondTokens(args *vmcommon.ContractCallInput) vmcommon.Re return vmcommon.UserError } - err = v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unBond function") - return vmcommon.UserError - } - + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) err = v.saveRegistrationData(args.CallerAddr, registrationData) if err != nil { v.eei.AddReturnMessage("cannot save registration data: error " + err.Error()) @@ -2027,6 +2116,16 @@ func (v *validatorSC) mergeValidatorData(args *vmcommon.ContractCallInput) vmcom validatorConfig := v.getConfig(v.eei.BlockChainHook().CurrentEpoch()) finalValidatorData.LockedStake.Mul(validatorConfig.NodePrice, big.NewInt(int64(finalValidatorData.NumRegistered))) + if v.isNumberOfNodesTooHigh(len(finalValidatorData.BlsPubKeys)) { + v.eei.AddReturnMessage("number of nodes is too high") + return vmcommon.UserError + } + + if v.isStakeTooHigh(finalValidatorData) { + v.eei.AddReturnMessage("total stake limit reached") + return vmcommon.UserError + } + v.eei.SetStorage(oldAddress, nil) err = v.saveRegistrationData(delegationAddr, finalValidatorData) if err != nil { diff --git a/vm/systemSmartContracts/validator_test.go b/vm/systemSmartContracts/validator_test.go index f4aefd377ec..758e0167a9d 100644 --- a/vm/systemSmartContracts/validator_test.go +++ b/vm/systemSmartContracts/validator_test.go @@ -51,6 +51,8 @@ func createMockArgumentsForValidatorSCWithSystemScAddresses( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, Marshalizer: &mock.MarshalizerMock{}, GenesisTotalSupply: big.NewInt(100000000), @@ -64,7 +66,9 @@ func createMockArgumentsForValidatorSCWithSystemScAddresses( common.ValidatorToDelegationFlag, common.DoubleKeyProtectionFlag, common.MultiClaimOnDelegationFlag, + common.StakeLimitsFlag, ), + NodesCoordinator: &mock.NodesCoordinatorStub{}, } return args @@ -224,6 +228,39 @@ func TestNewStakingValidatorSmartContract_NilValidatorSmartContractAddress(t *te assert.True(t, errors.Is(err, vm.ErrNilValidatorSmartContractAddress)) } +func TestNewStakingValidatorSmartContract_NilNodesCoordinator(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.NodesCoordinator = nil + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, vm.ErrNilNodesCoordinator)) +} + +func TestNewStakingValidatorSmartContract_ZeroStakeLimit(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.StakingSCConfig.StakeLimitPercentage = 0.0 + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, vm.ErrInvalidStakeLimitPercentage)) +} + +func TestNewStakingValidatorSmartContract_ZeroNodeLimit(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.StakingSCConfig.NodeLimitPercentage = 0.0 + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, vm.ErrInvalidNodeLimitPercentage)) +} + func TestNewStakingValidatorSmartContract_NilSigVerifier(t *testing.T) { t.Parallel() @@ -368,6 +405,138 @@ func TestStakingValidatorSC_ExecuteStakeWithoutArgumentsShouldWork(t *testing.T) assert.Equal(t, vmcommon.Ok, errCode) } +func TestStakingValidatorSC_ExecuteStakeTooMuchStake(t *testing.T) { + t.Parallel() + + arguments := CreateVmContractCallInput() + validatorData := createAValidatorData(25000000, 2, 12500000) + validatorDataBytes, _ := json.Marshal(&validatorData) + + eei := &mock.SystemEIStub{} + eei.GetStorageCalled = func(key []byte) []byte { + if bytes.Equal(key, arguments.CallerAddr) { + return validatorDataBytes + } + return nil + } + eei.AddReturnMessageCalled = func(msg string) { + assert.Equal(t, msg, "total stake limit reached") + } + + args := createMockArgumentsForValidatorSC() + args.Eei = eei + + stakingValidatorSc, _ := NewValidatorSmartContract(args) + + arguments.Function = "stake" + arguments.CallValue = big.NewInt(0).Set(stakingValidatorSc.totalStakeLimit) + + errCode := stakingValidatorSc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, errCode) +} + +func TestStakingValidatorSC_ExecuteStakeTooManyNodes(t *testing.T) { + t.Parallel() + + arguments := CreateVmContractCallInput() + + eei := &mock.SystemEIStub{} + + args := createMockArgumentsForValidatorSC() + args.Eei = eei + + args.NodesCoordinator = &mock.NodesCoordinatorStub{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }} + args.StakingSCConfig.NodeLimitPercentage = 0.005 + stakingValidatorSc, _ := NewValidatorSmartContract(args) + + validatorData := createAValidatorData(75000000, 5, 12500000) + validatorDataBytes, _ := json.Marshal(&validatorData) + + eei.GetStorageCalled = func(key []byte) []byte { + if bytes.Equal(key, arguments.CallerAddr) { + return validatorDataBytes + } + return nil + } + called := false + eei.AddLogEntryCalled = func(entry *vmcommon.LogEntry) { + called = true + assert.Equal(t, entry.Topics[0], []byte(numberOfNodesTooHigh)) + } + + eei.ExecuteOnDestContextCalled = func(destination, sender []byte, value *big.Int, input []byte) (*vmcommon.VMOutput, error) { + if strings.Contains(string(input), "stake") { + assert.Fail(t, "should not stake nodes") + } + return &vmcommon.VMOutput{}, nil + } + + key1 := []byte("Key1") + key2 := []byte("Key2") + key3 := []byte("Key3") + arguments.Function = "stake" + arguments.CallValue = big.NewInt(0).Mul(big.NewInt(3), big.NewInt(10000000)) + arguments.Arguments = [][]byte{big.NewInt(3).Bytes(), key1, []byte("msg1"), key2, []byte("msg2"), key3, []byte("msg3")} + + errCode := stakingValidatorSc.Execute(arguments) + assert.Equal(t, vmcommon.Ok, errCode) + assert.True(t, called) +} + +func TestStakingValidatorSC_ExecuteStakeTooManyNodesAddOnly2(t *testing.T) { + t.Parallel() + + arguments := CreateVmContractCallInput() + + eei := &mock.SystemEIStub{} + + args := createMockArgumentsForValidatorSC() + args.Eei = eei + + args.NodesCoordinator = &mock.NodesCoordinatorStub{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }} + args.StakingSCConfig.NodeLimitPercentage = 0.005 + stakingValidatorSc, _ := NewValidatorSmartContract(args) + + validatorData := createAValidatorData(75000000, 3, 12500000) + validatorDataBytes, _ := json.Marshal(&validatorData) + + eei.GetStorageCalled = func(key []byte) []byte { + if bytes.Equal(key, arguments.CallerAddr) { + return validatorDataBytes + } + return nil + } + called := false + eei.AddLogEntryCalled = func(entry *vmcommon.LogEntry) { + called = true + assert.Equal(t, entry.Topics[0], []byte(numberOfNodesTooHigh)) + } + + stakeCalledInStakingSC := 0 + eei.ExecuteOnDestContextCalled = func(destination, sender []byte, value *big.Int, input []byte) (*vmcommon.VMOutput, error) { + if strings.Contains(string(input), "stake") { + stakeCalledInStakingSC++ + } + return &vmcommon.VMOutput{}, nil + } + + key1 := []byte("Key1") + key2 := []byte("Key2") + key3 := []byte("Key3") + arguments.Function = "stake" + arguments.CallValue = big.NewInt(0).Mul(big.NewInt(3), big.NewInt(10000000)) + arguments.Arguments = [][]byte{big.NewInt(3).Bytes(), key1, []byte("msg1"), key2, []byte("msg2"), key3, []byte("msg3")} + + errCode := stakingValidatorSc.Execute(arguments) + assert.Equal(t, vmcommon.Ok, errCode) + assert.True(t, called) + assert.Equal(t, 2, stakeCalledInStakingSC) +} + func TestStakingValidatorSC_ExecuteStakeAddedNewPubKeysShouldWork(t *testing.T) { t.Parallel() @@ -1239,6 +1408,8 @@ func TestStakingValidatorSC_StakeUnStake3XRestake2(t *testing.T) { return stakingSc, nil }}) + nodesCoordinator := &mock.NodesCoordinatorStub{} + args.NodesCoordinator = nodesCoordinator args.StakingSCConfig = argsStaking.StakingSCConfig args.Eei = eei @@ -1282,9 +1453,21 @@ func TestStakingValidatorSC_StakeUnStake3XRestake2(t *testing.T) { retCode = sc.Execute(arguments) assert.Equal(t, vmcommon.Ok, retCode) + nodesCoordinator.GetNumTotalEligibleCalled = func() uint64 { + return 1 + } + arguments.Function = "reStakeUnStakedNodes" arguments.Arguments = [][]byte{stakerPubKey1, stakerPubKey2} arguments.CallValue = big.NewInt(0) + retCode = sc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, retCode) + assert.Equal(t, eei.returnMessage, "number of nodes is too high") + + nodesCoordinator.GetNumTotalEligibleCalled = func() uint64 { + return 10 + } + retCode = sc.Execute(arguments) assert.Equal(t, vmcommon.Ok, retCode) } @@ -5104,6 +5287,101 @@ func TestStakingValidatorSC_MergeValidatorData(t *testing.T) { assert.Equal(t, stakedData.RewardAddress, vm.FirstDelegationSCAddress) } +func TestStakingValidatorSC_MergeValidatorDataTooMuchStake(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + argsVMContext := createArgsVMContext() + argsVMContext.InputParser = parsers.NewCallArgsParser() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + + argsStaking := createMockStakingScArguments() + argsStaking.Eei = eei + stakingSc, _ := NewStakingSmartContract(argsStaking) + eei.SetSCAddress([]byte("addr")) + _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { + return stakingSc, nil + }}) + + args := createMockArgumentsForValidatorSC() + args.StakingSCConfig = argsStaking.StakingSCConfig + args.Eei = eei + + sc, _ := NewValidatorSmartContract(args) + arguments := CreateVmContractCallInput() + arguments.CallerAddr = vm.ESDTSCAddress + arguments.Function = "mergeValidatorData" + arguments.Arguments = [][]byte{} + arguments.CallValue = big.NewInt(0) + arguments.CallerAddr = sc.delegationMgrSCAddress + randomAddress := bytes.Repeat([]byte{1}, len(arguments.CallerAddr)) + arguments.Arguments = [][]byte{randomAddress, vm.FirstDelegationSCAddress} + + limitPer4 := big.NewInt(0).Div(sc.totalStakeLimit, big.NewInt(4)) + + stake(t, sc, limitPer4, randomAddress, randomAddress, []byte("firsstKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, randomAddress, randomAddress, []byte("secondKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, randomAddress, randomAddress, []byte("thirddKey"), big.NewInt(1).Bytes()) + + stake(t, sc, limitPer4, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fourthKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fifthhKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("sixthhKey"), big.NewInt(1).Bytes()) + + retCode := sc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, retCode) + assert.Equal(t, eei.returnMessage, "total stake limit reached") +} + +func TestStakingValidatorSC_MergeValidatorDataTooMuchNodes(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + argsVMContext := createArgsVMContext() + argsVMContext.InputParser = parsers.NewCallArgsParser() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + + argsStaking := createMockStakingScArguments() + argsStaking.Eei = eei + stakingSc, _ := NewStakingSmartContract(argsStaking) + eei.SetSCAddress([]byte("addr")) + _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { + return stakingSc, nil + }}) + + args := createMockArgumentsForValidatorSC() + args.NodesCoordinator = &mock.NodesCoordinatorStub{GetNumTotalEligibleCalled: func() uint64 { + return 5 + }} + args.StakingSCConfig = argsStaking.StakingSCConfig + args.Eei = eei + + sc, _ := NewValidatorSmartContract(args) + arguments := CreateVmContractCallInput() + arguments.CallerAddr = vm.ESDTSCAddress + arguments.Function = "mergeValidatorData" + arguments.Arguments = [][]byte{} + arguments.CallValue = big.NewInt(0) + arguments.CallerAddr = sc.delegationMgrSCAddress + randomAddress := bytes.Repeat([]byte{1}, len(arguments.CallerAddr)) + arguments.Arguments = [][]byte{randomAddress, vm.FirstDelegationSCAddress} + + stake(t, sc, stakingSc.stakeValue, randomAddress, randomAddress, []byte("firsstKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, randomAddress, randomAddress, []byte("secondKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, randomAddress, randomAddress, []byte("thirddKey"), big.NewInt(1).Bytes()) + + stake(t, sc, stakingSc.stakeValue, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fourthKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fifthhKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("sixthhKey"), big.NewInt(1).Bytes()) + + retCode := sc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, retCode) + assert.Equal(t, eei.returnMessage, "number of nodes is too high") +} + func TestValidatorSC_getMinUnStakeTokensValueFromDelegationManagerMarshalizerFail(t *testing.T) { t.Parallel()